xref: /linux/drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c (revision ca220141fa8ebae09765a242076b2b77338106b0)
1 /*
2  * Copyright 2025 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/delay.h>
24 #include <linux/kernel.h>
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include "amdgpu.h"
29 #include "amdgpu_gfx.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_smu.h"
32 #include "amdgpu_atomfirmware.h"
33 #include "amdgpu_userq_fence.h"
34 #include "imu_v12_1.h"
35 #include "soc_v1_0.h"
36 #include "gfx_v12_1_pkt.h"
37 
38 #include "gc/gc_12_1_0_offset.h"
39 #include "gc/gc_12_1_0_sh_mask.h"
40 #include "soc24_enum.h"
41 #include "ivsrcid/gfx/irqsrcs_gfx_12_1_0.h"
42 
43 #include "soc15.h"
44 #include "clearstate_gfx12.h"
45 #include "v12_structs.h"
46 #include "gfx_v12_1.h"
47 #include "mes_v12_1.h"
48 
49 #define GFX12_MEC_HPD_SIZE	2048
50 #define NUM_SIMD_PER_CU_GFX12_1	4
51 
52 #define RLCG_UCODE_LOADING_START_ADDRESS	0x00002000L
53 
54 #define regCP_HQD_EOP_CONTROL_DEFAULT                                             0x00000000
55 #define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT                                     0x00000000
56 #define regCP_MQD_CONTROL_DEFAULT                                                 0x00000100
57 #define regCP_HQD_PQ_CONTROL_DEFAULT                                              0x00308509
58 #define regCP_HQD_PQ_RPTR_DEFAULT                                                 0x00000000
59 #define regCP_HQD_PERSISTENT_STATE_DEFAULT                                        0x0ae06301
60 #define regCP_HQD_IB_CONTROL_DEFAULT                                              0x00100000
61 
62 MODULE_FIRMWARE("amdgpu/gc_12_1_0_mec.bin");
63 MODULE_FIRMWARE("amdgpu/gc_12_1_0_rlc.bin");
64 
65 #define SH_MEM_ALIGNMENT_MODE_UNALIGNED_GFX12_1_0	0x00000001
66 #define DEFAULT_SH_MEM_CONFIG \
67 	((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
68 	 (SH_MEM_ALIGNMENT_MODE_UNALIGNED_GFX12_1_0 << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
69 	 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
70 
71 static void gfx_v12_1_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id);
72 static void gfx_v12_1_set_ring_funcs(struct amdgpu_device *adev);
73 static void gfx_v12_1_set_irq_funcs(struct amdgpu_device *adev);
74 static void gfx_v12_1_set_rlc_funcs(struct amdgpu_device *adev);
75 static void gfx_v12_1_set_mqd_funcs(struct amdgpu_device *adev);
76 static void gfx_v12_1_set_imu_funcs(struct amdgpu_device *adev);
77 static int gfx_v12_1_get_cu_info(struct amdgpu_device *adev,
78 				 struct amdgpu_cu_info *cu_info);
79 static uint64_t gfx_v12_1_get_gpu_clock_counter(struct amdgpu_device *adev);
80 static void gfx_v12_1_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num,
81 				       u32 sh_num, u32 instance, int xcc_id);
82 static void gfx_v12_1_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
83 				     uint32_t val);
84 static int gfx_v12_1_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
85 static void gfx_v12_1_ring_invalidate_tlbs(struct amdgpu_ring *ring,
86 					   uint16_t pasid, uint32_t flush_type,
87 					   bool all_hub, uint8_t dst_sel);
88 static void gfx_v12_1_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
89 static void gfx_v12_1_xcc_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
90 static void gfx_v12_1_update_perf_clk(struct amdgpu_device *adev,
91 				      bool enable);
92 static void gfx_v12_1_xcc_update_perf_clk(struct amdgpu_device *adev,
93 					 bool enable, int xcc_id);
94 static int gfx_v12_1_init_cp_compute_microcode_bo(struct amdgpu_device *adev);
95 
96 static void gfx_v12_1_kiq_set_resources(struct amdgpu_ring *kiq_ring,
97 					uint64_t queue_mask)
98 {
99 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
100 	amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
101 			  PACKET3_SET_RESOURCES_QUEUE_TYPE(0));	/* vmid_mask:0 queue_type:0 (KIQ) */
102 	amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask));	/* queue mask lo */
103 	amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask));	/* queue mask hi */
104 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask lo */
105 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask hi */
106 	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
107 	amdgpu_ring_write(kiq_ring, 0);
108 }
109 
110 static void gfx_v12_1_kiq_map_queues(struct amdgpu_ring *kiq_ring,
111 				     struct amdgpu_ring *ring)
112 {
113 	uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
114 	uint64_t wptr_addr = ring->wptr_gpu_addr;
115 	uint32_t me = 0, eng_sel = 0;
116 
117 	switch (ring->funcs->type) {
118 	case AMDGPU_RING_TYPE_COMPUTE:
119 		me = 1;
120 		eng_sel = 0;
121 		break;
122 	case AMDGPU_RING_TYPE_MES:
123 		me = 2;
124 		eng_sel = 5;
125 		break;
126 	default:
127 		WARN_ON(1);
128 	}
129 
130 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
131 	/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
132 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
133 			  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
134 			  PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
135 			  PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
136 			  PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
137 			  PACKET3_MAP_QUEUES_ME((me)) |
138 			  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
139 			  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
140 			  PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
141 			  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
142 	amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
143 	amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
144 	amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
145 	amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
146 	amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
147 }
148 
149 static void gfx_v12_1_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
150 				       struct amdgpu_ring *ring,
151 				       enum amdgpu_unmap_queues_action action,
152 				       u64 gpu_addr, u64 seq)
153 {
154 	struct amdgpu_device *adev = kiq_ring->adev;
155 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
156 
157 	if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
158 		amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr,
159 					      seq, kiq_ring->xcc_id);
160 		return;
161 	}
162 
163 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
164 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
165 			  PACKET3_UNMAP_QUEUES_ACTION(action) |
166 			  PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
167 			  PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
168 			  PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
169 	amdgpu_ring_write(kiq_ring,
170 		  PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
171 
172 	if (action == PREEMPT_QUEUES_NO_UNMAP) {
173 		amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
174 		amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
175 		amdgpu_ring_write(kiq_ring, seq);
176 	} else {
177 		amdgpu_ring_write(kiq_ring, 0);
178 		amdgpu_ring_write(kiq_ring, 0);
179 		amdgpu_ring_write(kiq_ring, 0);
180 	}
181 }
182 
183 static void gfx_v12_1_kiq_query_status(struct amdgpu_ring *kiq_ring,
184 				       struct amdgpu_ring *ring,
185 				       u64 addr, u64 seq)
186 {
187 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
188 
189 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
190 	amdgpu_ring_write(kiq_ring,
191 			  PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
192 			  PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
193 			  PACKET3_QUERY_STATUS_COMMAND(2));
194 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
195 			  PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
196 			  PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
197 	amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
198 	amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
199 	amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
200 	amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
201 }
202 
203 static void gfx_v12_1_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
204 					  uint16_t pasid,
205 					  uint32_t flush_type,
206 					  bool all_hub)
207 {
208 	gfx_v12_1_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1);
209 }
210 
211 static const struct kiq_pm4_funcs gfx_v12_1_kiq_pm4_funcs = {
212 	.kiq_set_resources = gfx_v12_1_kiq_set_resources,
213 	.kiq_map_queues = gfx_v12_1_kiq_map_queues,
214 	.kiq_unmap_queues = gfx_v12_1_kiq_unmap_queues,
215 	.kiq_query_status = gfx_v12_1_kiq_query_status,
216 	.kiq_invalidate_tlbs = gfx_v12_1_kiq_invalidate_tlbs,
217 	.set_resources_size = 8,
218 	.map_queues_size = 7,
219 	.unmap_queues_size = 6,
220 	.query_status_size = 7,
221 	.invalidate_tlbs_size = 2,
222 };
223 
224 static void gfx_v12_1_set_kiq_pm4_funcs(struct amdgpu_device *adev)
225 {
226 	int i, num_xcc;
227 
228 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
229 	for (i =0; i < num_xcc; i++)
230 		adev->gfx.kiq[i].pmf = &gfx_v12_1_kiq_pm4_funcs;
231 }
232 
233 static void gfx_v12_1_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
234 				   int mem_space, int opt, uint32_t addr0,
235 				   uint32_t addr1, uint32_t ref,
236 				   uint32_t mask, uint32_t inv)
237 {
238 	if (mem_space == 0) {
239 		addr0 = soc_v1_0_normalize_xcc_reg_offset(addr0);
240 		addr1 = soc_v1_0_normalize_xcc_reg_offset(addr1);
241 	}
242 
243 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
244 	amdgpu_ring_write(ring,
245 			  /* memory (1) or register (0) */
246 			  (WAIT_REG_MEM_MEM_SPACE(mem_space) |
247 			   WAIT_REG_MEM_OPERATION(opt) | /* wait */
248 			   WAIT_REG_MEM_FUNCTION(3) |  /* equal */
249 			   WAIT_REG_MEM_ENGINE(eng_sel)));
250 
251 	if (mem_space)
252 		BUG_ON(addr0 & 0x3); /* Dword align */
253 	amdgpu_ring_write(ring, addr0);
254 	amdgpu_ring_write(ring, addr1);
255 	amdgpu_ring_write(ring, ref);
256 	amdgpu_ring_write(ring, mask);
257 	amdgpu_ring_write(ring, inv); /* poll interval */
258 }
259 
260 static int gfx_v12_1_ring_test_ring(struct amdgpu_ring *ring)
261 {
262 	struct amdgpu_device *adev = ring->adev;
263 	uint32_t scratch_reg0_offset, xcc_offset;
264 	uint32_t tmp = 0;
265 	unsigned i;
266 	int r;
267 
268 	/* Use register offset which is local to XCC in the packet */
269 	xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
270 	scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0);
271 	WREG32(scratch_reg0_offset, 0xCAFEDEAD);
272 	tmp = RREG32(scratch_reg0_offset);
273 
274 	r = amdgpu_ring_alloc(ring, 5);
275 	if (r) {
276 		dev_err(adev->dev,
277 			"amdgpu: cp failed to lock ring %d (%d).\n",
278 			ring->idx, r);
279 		return r;
280 	}
281 
282 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
283 		gfx_v12_1_ring_emit_wreg(ring, xcc_offset, 0xDEADBEEF);
284 	} else {
285 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
286 		amdgpu_ring_write(ring, xcc_offset -
287 				  PACKET3_SET_UCONFIG_REG_START);
288 		amdgpu_ring_write(ring, 0xDEADBEEF);
289 	}
290 	amdgpu_ring_commit(ring);
291 
292 	for (i = 0; i < adev->usec_timeout; i++) {
293 		tmp = RREG32(scratch_reg0_offset);
294 		if (tmp == 0xDEADBEEF)
295 			break;
296 		if (amdgpu_emu_mode == 1)
297 			msleep(1);
298 		else
299 			udelay(1);
300 	}
301 
302 	if (i >= adev->usec_timeout)
303 		r = -ETIMEDOUT;
304 	return r;
305 }
306 
307 static int gfx_v12_1_ring_test_ib(struct amdgpu_ring *ring, long timeout)
308 {
309 	struct amdgpu_device *adev = ring->adev;
310 	struct amdgpu_ib ib;
311 	struct dma_fence *f = NULL;
312 	unsigned index;
313 	uint64_t gpu_addr;
314 	volatile uint32_t *cpu_ptr;
315 	long r;
316 
317 	/* MES KIQ fw hasn't indirect buffer support for now */
318 	if (adev->enable_mes_kiq &&
319 	    ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
320 		return 0;
321 
322 	memset(&ib, 0, sizeof(ib));
323 
324 	r = amdgpu_device_wb_get(adev, &index);
325 	if (r)
326 		return r;
327 
328 	gpu_addr = adev->wb.gpu_addr + (index * 4);
329 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
330 	cpu_ptr = &adev->wb.wb[index];
331 
332 	r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
333 	if (r) {
334 		dev_err(adev->dev, "amdgpu: failed to get ib (%ld).\n", r);
335 		goto err1;
336 	}
337 
338 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
339 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
340 	ib.ptr[2] = lower_32_bits(gpu_addr);
341 	ib.ptr[3] = upper_32_bits(gpu_addr);
342 	ib.ptr[4] = 0xDEADBEEF;
343 	ib.length_dw = 5;
344 
345 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
346 	if (r)
347 		goto err2;
348 
349 	r = dma_fence_wait_timeout(f, false, timeout);
350 	if (r == 0) {
351 		r = -ETIMEDOUT;
352 		goto err2;
353 	} else if (r < 0) {
354 		goto err2;
355 	}
356 
357 	if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF)
358 		r = 0;
359 	else
360 		r = -EINVAL;
361 err2:
362 	amdgpu_ib_free(&ib, NULL);
363 	dma_fence_put(f);
364 err1:
365 	amdgpu_device_wb_free(adev, index);
366 	return r;
367 }
368 
369 static void gfx_v12_1_free_microcode(struct amdgpu_device *adev)
370 {
371 	amdgpu_ucode_release(&adev->gfx.rlc_fw);
372 	amdgpu_ucode_release(&adev->gfx.mec_fw);
373 
374 	kfree(adev->gfx.rlc.register_list_format);
375 }
376 
377 static int gfx_v12_1_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix)
378 {
379 	const struct psp_firmware_header_v1_0 *toc_hdr;
380 	int err = 0;
381 
382 	err = amdgpu_ucode_request(adev, &adev->psp.toc_fw,
383 				   AMDGPU_UCODE_REQUIRED,
384 				   "amdgpu/%s_toc.bin", ucode_prefix);
385 	if (err)
386 		goto out;
387 
388 	toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
389 	adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
390 	adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
391 	adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
392 	adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
393 			le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
394 	return 0;
395 out:
396 	amdgpu_ucode_release(&adev->psp.toc_fw);
397 	return err;
398 }
399 
400 static int gfx_v12_1_init_microcode(struct amdgpu_device *adev)
401 {
402 	char ucode_prefix[15];
403 	int err;
404 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
405 	uint16_t version_major;
406 	uint16_t version_minor;
407 
408 	DRM_DEBUG("\n");
409 
410 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
411 
412 	if (!amdgpu_sriov_vf(adev)) {
413 		err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
414 					   AMDGPU_UCODE_REQUIRED,
415 					   "amdgpu/%s_rlc.bin", ucode_prefix);
416 		if (err)
417 			goto out;
418 		rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
419 		version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
420 		version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
421 		err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
422 		if (err)
423 			goto out;
424 	}
425 
426 	err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
427 				   AMDGPU_UCODE_REQUIRED,
428 				   "amdgpu/%s_mec.bin", ucode_prefix);
429 	if (err)
430 		goto out;
431 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC);
432 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK);
433 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK);
434 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK);
435 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK);
436 
437 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
438 		err = gfx_v12_1_init_toc_microcode(adev, ucode_prefix);
439 
440 	/* only one MEC for gfx 12 */
441 	adev->gfx.mec2_fw = NULL;
442 
443 	if (adev->gfx.imu.funcs) {
444 		if (adev->gfx.imu.funcs->init_microcode) {
445 			err = adev->gfx.imu.funcs->init_microcode(adev);
446 			if (err)
447 				dev_err(adev->dev, "Failed to load imu firmware!\n");
448 		}
449 	}
450 
451 out:
452 	if (err) {
453 		amdgpu_ucode_release(&adev->gfx.rlc_fw);
454 		amdgpu_ucode_release(&adev->gfx.mec_fw);
455 	}
456 
457 	return err;
458 }
459 
460 static u32 gfx_v12_1_get_csb_size(struct amdgpu_device *adev)
461 {
462 	u32 count = 0;
463 	const struct cs_section_def *sect = NULL;
464 	const struct cs_extent_def *ext = NULL;
465 
466 	count += 1;
467 
468 	for (sect = gfx12_cs_data; sect->section != NULL; ++sect) {
469 		if (sect->id == SECT_CONTEXT) {
470 			for (ext = sect->section; ext->extent != NULL; ++ext)
471 				count += 2 + ext->reg_count;
472 		} else
473 			return 0;
474 	}
475 
476 	return count;
477 }
478 
479 static void gfx_v12_1_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
480 {
481 	u32 count = 0, clustercount = 0, i;
482 	const struct cs_section_def *sect = NULL;
483 	const struct cs_extent_def *ext = NULL;
484 
485 	if (adev->gfx.rlc.cs_data == NULL)
486 		return;
487 	if (buffer == NULL)
488 		return;
489 
490 	count += 1;
491 
492 	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
493 		if (sect->id == SECT_CONTEXT) {
494 			for (ext = sect->section; ext->extent != NULL; ++ext) {
495 				clustercount++;
496 				buffer[count++] = ext->reg_count;
497 				buffer[count++] = ext->reg_index;
498 
499 				for (i = 0; i < ext->reg_count; i++)
500 					buffer[count++] = cpu_to_le32(ext->extent[i]);
501 			}
502 		} else
503 			return;
504 	}
505 
506 	buffer[0] = clustercount;
507 }
508 
509 static void gfx_v12_1_rlc_fini(struct amdgpu_device *adev)
510 {
511 	/* clear state block */
512 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
513 			&adev->gfx.rlc.clear_state_gpu_addr,
514 			(void **)&adev->gfx.rlc.cs_ptr);
515 
516 	/* jump table block */
517 	amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
518 			&adev->gfx.rlc.cp_table_gpu_addr,
519 			(void **)&adev->gfx.rlc.cp_table_ptr);
520 }
521 
522 static void gfx_v12_1_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
523 {
524 	int xcc_id, num_xcc;
525 	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
526 
527 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
528 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
529 		reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)];
530 
531 		reg_access_ctrl->grbm_cntl =
532 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL);
533 		reg_access_ctrl->grbm_idx =
534 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX);
535 
536 		reg_access_ctrl->vfi_cmd =
537 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_VFI_CMD);
538 		reg_access_ctrl->vfi_stat =
539 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_VFI_STAT);
540 		reg_access_ctrl->vfi_addr =
541 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_VFI_ADDR);
542 		reg_access_ctrl->vfi_data =
543 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_VFI_DATA);
544 		reg_access_ctrl->vfi_grbm_cntl =
545 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_VFI_GRBM_GFX_CNTL);
546 		reg_access_ctrl->vfi_grbm_idx =
547 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_VFI_GRBM_GFX_INDEX);
548 		reg_access_ctrl->vfi_grbm_cntl_data = 0;
549 		reg_access_ctrl->vfi_grbm_idx_data = 0;
550 	}
551 	adev->gfx.rlc.rlcg_reg_access_supported = true;
552 }
553 
554 static int gfx_v12_1_rlc_init(struct amdgpu_device *adev)
555 {
556 	const struct cs_section_def *cs_data;
557 	int r, i, num_xcc;
558 
559 	adev->gfx.rlc.cs_data = gfx12_cs_data;
560 
561 	cs_data = adev->gfx.rlc.cs_data;
562 
563 	if (cs_data) {
564 		/* init clear state block */
565 		r = amdgpu_gfx_rlc_init_csb(adev);
566 		if (r)
567 			return r;
568 	}
569 
570 	/* init spm vmid with 0xf */
571 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
572 	for (i = 0; i < num_xcc; i++) {
573 		if (adev->gfx.rlc.funcs->update_spm_vmid)
574 			adev->gfx.rlc.funcs->update_spm_vmid(adev, i, NULL, 0xf);
575 	}
576 
577 	return 0;
578 }
579 
580 static void gfx_v12_1_mec_fini(struct amdgpu_device *adev)
581 {
582 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
583 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
584 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL);
585 }
586 
587 static int gfx_v12_1_mec_init(struct amdgpu_device *adev)
588 {
589 	int r, i, num_xcc;
590 	u32 *hpd;
591 	size_t mec_hpd_size;
592 
593 	bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
594 
595 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
596 	for (i = 0; i < num_xcc; i++)
597 		bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap,
598 			    AMDGPU_MAX_COMPUTE_QUEUES);
599 
600 	/* take ownership of the relevant compute queues */
601 	amdgpu_gfx_compute_queue_acquire(adev);
602 	mec_hpd_size = adev->gfx.num_compute_rings *
603 		       GFX12_MEC_HPD_SIZE * num_xcc;
604 
605 	if (mec_hpd_size) {
606 		r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
607 					      AMDGPU_GEM_DOMAIN_GTT,
608 					      &adev->gfx.mec.hpd_eop_obj,
609 					      &adev->gfx.mec.hpd_eop_gpu_addr,
610 					      (void **)&hpd);
611 		if (r) {
612 			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
613 			gfx_v12_1_mec_fini(adev);
614 			return r;
615 		}
616 
617 		memset(hpd, 0, mec_hpd_size);
618 
619 		amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
620 		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
621 	}
622 
623 	return 0;
624 }
625 
626 static uint32_t wave_read_ind(struct amdgpu_device *adev,
627 			      uint32_t xcc_id, uint32_t wave,
628 			      uint32_t address)
629 {
630 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
631 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
632 		(address << SQ_IND_INDEX__INDEX__SHIFT));
633 	return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
634 }
635 
636 static void wave_read_regs(struct amdgpu_device *adev,
637 			   uint32_t xcc_id, uint32_t wave,
638 			   uint32_t thread, uint32_t regno,
639 			   uint32_t num, uint32_t *out)
640 {
641 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
642 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
643 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
644 		(thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
645 		(SQ_IND_INDEX__AUTO_INCR_MASK));
646 	while (num--)
647 		*(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
648 }
649 
650 static void gfx_v12_1_read_wave_data(struct amdgpu_device *adev,
651 				     uint32_t xcc_id,
652 				     uint32_t simd, uint32_t wave,
653 				     uint32_t *dst, int *no_fields)
654 {
655 	/* in gfx12 the SIMD_ID is specified as part of the INSTANCE
656 	 * field when performing a select_se_sh so it should be
657 	 * zero here */
658 	WARN_ON(simd != 0);
659 
660 	/* type 4 wave data */
661 	dst[(*no_fields)++] = 4;
662 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_STATUS);
663 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_PC_LO);
664 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_PC_HI);
665 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXEC_LO);
666 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXEC_HI);
667 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_HW_ID1);
668 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_HW_ID2);
669 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_GPR_ALLOC);
670 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_LDS_ALLOC);
671 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_IB_STS);
672 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_IB_STS2);
673 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_IB_DBG1);
674 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_M0);
675 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_MODE);
676 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_STATE_PRIV);
677 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXCP_FLAG_PRIV);
678 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXCP_FLAG_USER);
679 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_TRAP_CTRL);
680 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_ACTIVE);
681 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_VALID_AND_IDLE);
682 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_DVGPR_ALLOC_LO);
683 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_DVGPR_ALLOC_HI);
684 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_SCHED_MODE);
685 }
686 
687 static void gfx_v12_1_read_wave_sgprs(struct amdgpu_device *adev,
688 				      uint32_t xcc_id, uint32_t simd,
689 				      uint32_t wave, uint32_t start,
690 				      uint32_t size, uint32_t *dst)
691 {
692 	WARN_ON(simd != 0);
693 
694 	wave_read_regs(adev, xcc_id, wave, 0,
695 		       start + SQIND_WAVE_SGPRS_OFFSET,
696 		       size, dst);
697 }
698 
699 static void gfx_v12_1_read_wave_vgprs(struct amdgpu_device *adev,
700 				      uint32_t xcc_id, uint32_t simd,
701 				      uint32_t wave, uint32_t thread,
702 				      uint32_t start, uint32_t size,
703 				      uint32_t *dst)
704 {
705 	wave_read_regs(adev, xcc_id, wave, thread,
706 		       start + SQIND_WAVE_VGPRS_OFFSET,
707 		       size, dst);
708 }
709 
710 static void gfx_v12_1_select_me_pipe_q(struct amdgpu_device *adev,
711 				       u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
712 {
713 	soc_v1_0_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id));
714 }
715 
716 static int gfx_v12_1_get_xccs_per_xcp(struct amdgpu_device *adev)
717 {
718 	/* Fill this in when the interface is ready */
719 	return 1;
720 }
721 
722 static int gfx_v12_1_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node)
723 {
724 	int logic_xcc;
725 	int xcc = (ih_node & 0x7) - 2 + (ih_node >> 3) * 4;
726 
727 	for (logic_xcc = 0; logic_xcc < NUM_XCC(adev->gfx.xcc_mask); logic_xcc++) {
728 		if (xcc == GET_INST(GC, logic_xcc))
729 			return logic_xcc;
730 	}
731 
732 	dev_err(adev->dev, "Couldn't find xcc mapping from IH node");
733 	return -EINVAL;
734 }
735 
736 static const struct amdgpu_gfx_funcs gfx_v12_1_gfx_funcs = {
737 	.get_gpu_clock_counter = &gfx_v12_1_get_gpu_clock_counter,
738 	.select_se_sh = &gfx_v12_1_xcc_select_se_sh,
739 	.read_wave_data = &gfx_v12_1_read_wave_data,
740 	.read_wave_sgprs = &gfx_v12_1_read_wave_sgprs,
741 	.read_wave_vgprs = &gfx_v12_1_read_wave_vgprs,
742 	.select_me_pipe_q = &gfx_v12_1_select_me_pipe_q,
743 	.update_perfmon_mgcg = &gfx_v12_1_update_perf_clk,
744 	.get_xccs_per_xcp = &gfx_v12_1_get_xccs_per_xcp,
745 	.ih_node_to_logical_xcc = &gfx_v12_1_ih_to_xcc_inst,
746 };
747 
748 static int gfx_v12_1_gpu_early_init(struct amdgpu_device *adev)
749 {
750 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
751 	case IP_VERSION(12, 1, 0):
752 		adev->gfx.config.max_hw_contexts = 8;
753 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
754 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
755 		adev->gfx.config.sc_hiz_tile_fifo_size = 0;
756 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
757 		break;
758 	default:
759 		BUG();
760 		break;
761 	}
762 
763 	return 0;
764 }
765 
766 static int gfx_v12_1_compute_ring_init(struct amdgpu_device *adev, int ring_id,
767 				       int xcc_id, int mec, int pipe, int queue)
768 {
769 	int r;
770 	unsigned irq_type;
771 	struct amdgpu_ring *ring;
772 	unsigned int hw_prio;
773 	uint32_t xcc_doorbell_start;
774 
775 	ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings +
776 				       ring_id];
777 
778 	/* mec0 is me1 */
779 	ring->xcc_id = xcc_id;
780 	ring->me = mec + 1;
781 	ring->pipe = pipe;
782 	ring->queue = queue;
783 
784 	ring->ring_obj = NULL;
785 	ring->use_doorbell = true;
786 	xcc_doorbell_start = adev->doorbell_index.mec_ring0 +
787 			     xcc_id * adev->doorbell_index.xcc_doorbell_range;
788 	ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1;
789 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr +
790 			     (ring_id + xcc_id * adev->gfx.num_compute_rings) *
791 			     GFX12_MEC_HPD_SIZE;
792 	ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
793 	sprintf(ring->name, "comp_%d.%d.%d.%d",
794 			ring->xcc_id, ring->me, ring->pipe, ring->queue);
795 
796 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
797 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
798 		+ ring->pipe;
799 	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
800 			AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
801 	/* type-2 packets are deprecated on MEC, use type-3 instead */
802 	r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
803 			     hw_prio, NULL);
804 	if (r)
805 		return r;
806 
807 	return 0;
808 }
809 
810 static struct {
811 	SOC24_FIRMWARE_ID	id;
812 	unsigned int		offset;
813 	unsigned int		size;
814 	unsigned int		size_x16;
815 	unsigned int		num_inst;
816 } rlc_autoload_info[SOC24_FIRMWARE_ID_MAX];
817 
818 #define RLC_TOC_OFFSET_DWUNIT   8
819 #define RLC_SIZE_MULTIPLE       1024
820 #define RLC_TOC_UMF_SIZE_inM	23ULL
821 #define RLC_TOC_FORMAT_API	165ULL
822 
823 #define RLC_NUM_INS_CODE0   1
824 #define RLC_NUM_INS_CODE1   8
825 #define RLC_NUM_INS_CODE2   2
826 #define RLC_NUM_INS_CODE3   16
827 
828 static void gfx_v12_1_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc)
829 {
830 	RLC_TABLE_OF_CONTENT_V2 *ucode = rlc_toc;
831 
832 	while (ucode && (ucode->id > SOC24_FIRMWARE_ID_INVALID)) {
833 		rlc_autoload_info[ucode->id].id = ucode->id;
834 		rlc_autoload_info[ucode->id].offset =
835 			ucode->offset * RLC_TOC_OFFSET_DWUNIT * 4;
836 		rlc_autoload_info[ucode->id].size =
837 			ucode->size_x16 ? ucode->size * RLC_SIZE_MULTIPLE * 4 :
838 					  ucode->size * 4;
839 		switch (ucode->vfflr_image_code) {
840 		case 0:
841 			rlc_autoload_info[ucode->id].num_inst =
842 				RLC_NUM_INS_CODE0;
843 			break;
844 		case 1:
845 			rlc_autoload_info[ucode->id].num_inst =
846 				RLC_NUM_INS_CODE1;
847 			break;
848 		case 2:
849 			rlc_autoload_info[ucode->id].num_inst =
850 				RLC_NUM_INS_CODE2;
851 			break;
852 		case 3:
853 			rlc_autoload_info[ucode->id].num_inst =
854 				RLC_NUM_INS_CODE3;
855 			break;
856 		default:
857 			dev_err(adev->dev,
858 				"Invalid Instance number detected\n");
859 			break;
860 		}
861 		ucode++;
862 	}
863 }
864 
865 static uint32_t gfx_v12_1_calc_toc_total_size(struct amdgpu_device *adev)
866 {
867 	uint32_t total_size = 0;
868 	SOC24_FIRMWARE_ID id;
869 
870 	gfx_v12_1_parse_rlc_toc(adev, adev->psp.toc.start_addr);
871 
872 	for (id = SOC24_FIRMWARE_ID_RLC_G_UCODE; id < SOC24_FIRMWARE_ID_MAX; id++)
873 		total_size += rlc_autoload_info[id].size;
874 
875 	/* In case the offset in rlc toc ucode is aligned */
876 	if (total_size < rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].offset)
877 		total_size = rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].offset +
878 			rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].size;
879 	if (total_size < (RLC_TOC_UMF_SIZE_inM << 20))
880 		total_size = RLC_TOC_UMF_SIZE_inM << 20;
881 
882 	return total_size;
883 }
884 
885 static int gfx_v12_1_rlc_autoload_buffer_init(struct amdgpu_device *adev)
886 {
887 	int r;
888 	uint32_t total_size;
889 
890 	total_size = gfx_v12_1_calc_toc_total_size(adev);
891 
892 	r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024,
893 				      AMDGPU_GEM_DOMAIN_VRAM,
894 				      &adev->gfx.rlc.rlc_autoload_bo,
895 				      &adev->gfx.rlc.rlc_autoload_gpu_addr,
896 				      (void **)&adev->gfx.rlc.rlc_autoload_ptr);
897 
898 	if (r) {
899 		dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
900 		return r;
901 	}
902 
903 	return 0;
904 }
905 
906 static void gfx_v12_1_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev,
907 						       SOC24_FIRMWARE_ID id,
908 						       const void *fw_data,
909 						       uint32_t fw_size)
910 {
911 	uint32_t toc_offset;
912 	uint32_t toc_fw_size, toc_fw_inst_size;
913 	char *ptr = adev->gfx.rlc.rlc_autoload_ptr;
914 	int i, num_inst;
915 
916 	if (id <= SOC24_FIRMWARE_ID_INVALID || id >= SOC24_FIRMWARE_ID_MAX)
917 		return;
918 
919 	toc_offset = rlc_autoload_info[id].offset;
920 	toc_fw_size = rlc_autoload_info[id].size;
921 	num_inst = rlc_autoload_info[id].num_inst;
922 	toc_fw_inst_size = toc_fw_size / num_inst;
923 
924 	if (fw_size == 0)
925 		fw_size = toc_fw_inst_size;
926 
927 	if (fw_size > toc_fw_inst_size)
928 		fw_size = toc_fw_inst_size;
929 
930 	for (i = 0; i < num_inst; i++) {
931 		if ((num_inst == RLC_NUM_INS_CODE0) ||
932 		    ((1 << (i / 2)) & adev->gfx.xcc_mask)) {
933 			memcpy(ptr + toc_offset + i * toc_fw_inst_size, fw_data, fw_size);
934 
935 			if (fw_size < toc_fw_inst_size)
936 				memset(ptr + toc_offset + fw_size + i * toc_fw_inst_size,
937 				       0, toc_fw_inst_size - fw_size);
938 		}
939 	}
940 }
941 
942 static void
943 gfx_v12_1_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev)
944 {
945 	void *data;
946 	uint32_t size;
947 	uint32_t *toc_ptr;
948 
949 	data = adev->psp.toc.start_addr;
950 	size = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_TOC].size;
951 
952 	toc_ptr = (uint32_t *)data + size / 4 - 2;
953 	*toc_ptr = (RLC_TOC_FORMAT_API << 24) | 0x1;
954 
955 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_TOC,
956 						   data, size);
957 }
958 
959 static void
960 gfx_v12_1_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev)
961 {
962 	const __le32 *fw_data;
963 	uint32_t fw_size;
964 	const struct gfx_firmware_header_v2_0 *cpv2_hdr;
965 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
966 	const struct rlc_firmware_header_v2_1 *rlcv21_hdr;
967 	const struct rlc_firmware_header_v2_2 *rlcv22_hdr;
968 	uint16_t version_major, version_minor;
969 
970 	/* mec ucode */
971 	cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
972 		adev->gfx.mec_fw->data;
973 	/* instruction */
974 	fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
975 		le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
976 	fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
977 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC,
978 						   fw_data, fw_size);
979 	/* data */
980 	fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
981 		le32_to_cpu(cpv2_hdr->data_offset_bytes));
982 	fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
983 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P0_STACK,
984 						   fw_data, fw_size);
985 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P1_STACK,
986 						   fw_data, fw_size);
987 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P2_STACK,
988 						   fw_data, fw_size);
989 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P3_STACK,
990 						   fw_data, fw_size);
991 
992 	/* rlc ucode */
993 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)
994 		adev->gfx.rlc_fw->data;
995 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
996 			le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes));
997 	fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes);
998 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_G_UCODE,
999 						   fw_data, fw_size);
1000 
1001 	version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1002 	version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1003 	if (version_major == 2) {
1004 		if (version_minor >= 1) {
1005 			rlcv21_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
1006 
1007 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1008 					le32_to_cpu(rlcv21_hdr->save_restore_list_gpm_offset_bytes));
1009 			fw_size = le32_to_cpu(rlcv21_hdr->save_restore_list_gpm_size_bytes);
1010 			gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLCG_SCRATCH,
1011 						   fw_data, fw_size);
1012 
1013 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1014 					le32_to_cpu(rlcv21_hdr->save_restore_list_srm_offset_bytes));
1015 			fw_size = le32_to_cpu(rlcv21_hdr->save_restore_list_srm_size_bytes);
1016 			gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_SRM_ARAM,
1017 						   fw_data, fw_size);
1018 		}
1019 		if (version_minor >= 2) {
1020 			rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1021 
1022 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1023 					le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes));
1024 			fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes);
1025 			gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLX6_UCODE,
1026 						   fw_data, fw_size);
1027 
1028 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1029 					le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes));
1030 			fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes);
1031 			gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLX6_DRAM_BOOT,
1032 						   fw_data, fw_size);
1033 		}
1034 	}
1035 }
1036 
1037 static void
1038 gfx_v12_1_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev)
1039 {
1040 	const __le32 *fw_data;
1041 	uint32_t fw_size;
1042 	const struct sdma_firmware_header_v3_0 *sdma_hdr;
1043 
1044 	if (adev->sdma.instance[0].fw) {
1045 		sdma_hdr = (const struct sdma_firmware_header_v3_0 *)
1046 			adev->sdma.instance[0].fw->data;
1047 		fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1048 				le32_to_cpu(sdma_hdr->ucode_offset_bytes));
1049 		fw_size = le32_to_cpu(sdma_hdr->ucode_size_bytes);
1050 
1051 		gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_SDMA_UCODE_TH0,
1052 							   fw_data, fw_size);
1053 	}
1054 }
1055 
1056 static void
1057 gfx_v12_1_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev)
1058 {
1059 	const __le32 *fw_data;
1060 	unsigned fw_size;
1061 	const struct mes_firmware_header_v1_0 *mes_hdr;
1062 	int pipe, ucode_id, data_id;
1063 
1064 	for (pipe = 0; pipe < 2; pipe++) {
1065 		if (pipe == 0) {
1066 			ucode_id = SOC24_FIRMWARE_ID_RS64_MES_P0;
1067 			data_id  = SOC24_FIRMWARE_ID_RS64_MES_P0_STACK;
1068 		} else {
1069 			ucode_id = SOC24_FIRMWARE_ID_RS64_MES_P1;
1070 			data_id  = SOC24_FIRMWARE_ID_RS64_MES_P1_STACK;
1071 		}
1072 
1073 		mes_hdr = (const struct mes_firmware_header_v1_0 *)
1074 			adev->mes.fw[pipe]->data;
1075 
1076 		fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1077 				le32_to_cpu(mes_hdr->mes_ucode_offset_bytes));
1078 		fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
1079 
1080 		gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, ucode_id, fw_data, fw_size);
1081 
1082 		fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1083 				le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
1084 		fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
1085 
1086 		gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, data_id, fw_data, fw_size);
1087 	}
1088 }
1089 
1090 static int gfx_v12_1_rlc_backdoor_autoload_enable(struct amdgpu_device *adev)
1091 {
1092 	uint32_t rlc_g_offset, rlc_g_size;
1093 	uint64_t gpu_addr;
1094 	uint32_t data;
1095 	int i, num_xcc;
1096 
1097 	/* RLC autoload sequence 2: copy ucode */
1098 	gfx_v12_1_rlc_backdoor_autoload_copy_sdma_ucode(adev);
1099 	gfx_v12_1_rlc_backdoor_autoload_copy_gfx_ucode(adev);
1100 	gfx_v12_1_rlc_backdoor_autoload_copy_mes_ucode(adev);
1101 	gfx_v12_1_rlc_backdoor_autoload_copy_toc_ucode(adev);
1102 
1103 	rlc_g_offset = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_G_UCODE].offset;
1104 	rlc_g_size = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_G_UCODE].size;
1105 	gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset - adev->gmc.vram_start;
1106 
1107 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1108 	for (i = 0; i < num_xcc; i++) {
1109 		WREG32_SOC15(GC, GET_INST(GC, i),
1110 			     regGFX_IMU_RLC_BOOTLOADER_ADDR_HI,
1111 			     upper_32_bits(gpu_addr));
1112 		WREG32_SOC15(GC, GET_INST(GC, i),
1113 			     regGFX_IMU_RLC_BOOTLOADER_ADDR_LO,
1114 			     lower_32_bits(gpu_addr));
1115 		WREG32_SOC15(GC, GET_INST(GC, i),
1116 			     regGFX_IMU_RLC_BOOTLOADER_SIZE,
1117 			     rlc_g_size);
1118 	}
1119 
1120 	if (adev->gfx.imu.funcs) {
1121 		/* RLC autoload sequence 3: load IMU fw */
1122 		if (adev->gfx.imu.funcs->load_microcode)
1123 			adev->gfx.imu.funcs->load_microcode(adev);
1124 	}
1125 
1126 	/* unhalt rlc to start autoload */
1127 	for (i = 0; i < num_xcc; i++) {
1128 		data = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_THREAD_ENABLE);
1129 		data = REG_SET_FIELD(data, RLC_GPM_THREAD_ENABLE, THREAD0_ENABLE, 1);
1130 		data = REG_SET_FIELD(data, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1);
1131 		WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_THREAD_ENABLE, data);
1132 		WREG32_SOC15(GC, GET_INST(GC, i), regRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK);
1133 	}
1134 
1135 	return 0;
1136 }
1137 
1138 static int gfx_v12_1_sw_init(struct amdgpu_ip_block *ip_block)
1139 {
1140 	int i, j, k, r, ring_id = 0;
1141 	unsigned num_compute_rings;
1142 	int xcc_id, num_xcc;
1143 	struct amdgpu_device *adev = ip_block->adev;
1144 
1145 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1146 	case IP_VERSION(12, 1, 0):
1147 		adev->gfx.mec.num_mec = 1;
1148 		adev->gfx.mec.num_pipe_per_mec = 4;
1149 		adev->gfx.mec.num_queue_per_pipe = 8;
1150 		break;
1151 	default:
1152 		adev->gfx.mec.num_mec = 2;
1153 		adev->gfx.mec.num_pipe_per_mec = 2;
1154 		adev->gfx.mec.num_queue_per_pipe = 4;
1155 		break;
1156 	}
1157 
1158 	/* recalculate compute rings to use based on hardware configuration */
1159 	num_compute_rings = (adev->gfx.mec.num_pipe_per_mec *
1160 			     adev->gfx.mec.num_queue_per_pipe) / 2;
1161 	adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings,
1162 					  num_compute_rings);
1163 
1164 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1165 
1166 	/* EOP Event */
1167 	r = amdgpu_irq_add_id(adev, SOC_V1_0_IH_CLIENTID_GRBM_CP,
1168 			      GFX_12_1_0__SRCID__CP_EOP_INTERRUPT,
1169 			      &adev->gfx.eop_irq);
1170 	if (r)
1171 		return r;
1172 
1173 	/* Privileged reg */
1174 	r = amdgpu_irq_add_id(adev, SOC_V1_0_IH_CLIENTID_GRBM_CP,
1175 			      GFX_12_1_0__SRCID__CP_PRIV_REG_FAULT,
1176 			      &adev->gfx.priv_reg_irq);
1177 	if (r)
1178 		return r;
1179 
1180 	/* Privileged inst */
1181 	r = amdgpu_irq_add_id(adev, SOC_V1_0_IH_CLIENTID_GRBM_CP,
1182 			      GFX_12_1_0__SRCID__CP_PRIV_INSTR_FAULT,
1183 			      &adev->gfx.priv_inst_irq);
1184 	if (r)
1185 		return r;
1186 
1187 	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1188 
1189 	r = gfx_v12_1_rlc_init(adev);
1190 	if (r) {
1191 		dev_err(adev->dev, "Failed to init rlc BOs!\n");
1192 		return r;
1193 	}
1194 
1195 	r = gfx_v12_1_mec_init(adev);
1196 	if (r) {
1197 		dev_err(adev->dev, "Failed to init MEC BOs!\n");
1198 		return r;
1199 	}
1200 
1201 	/* set up the compute queues - allocate horizontally across pipes */
1202 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1203 		ring_id = 0;
1204 		for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1205 			for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1206 				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1207 					if (!amdgpu_gfx_is_mec_queue_enabled(adev,
1208 								xcc_id, i, k, j))
1209 						continue;
1210 
1211 					r = gfx_v12_1_compute_ring_init(adev, ring_id,
1212 								xcc_id, i, k, j);
1213 					if (r)
1214 						return r;
1215 
1216 					ring_id++;
1217 				}
1218 			}
1219 		}
1220 
1221 		if (!adev->enable_mes_kiq) {
1222 			r = amdgpu_gfx_kiq_init(adev, GFX12_MEC_HPD_SIZE, xcc_id);
1223 			if (r) {
1224 				dev_err(adev->dev, "Failed to init KIQ BOs!\n");
1225 				return r;
1226 			}
1227 
1228 			r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
1229 			if (r)
1230 				return r;
1231 		}
1232 
1233 		r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v12_1_compute_mqd), xcc_id);
1234 		if (r)
1235 			return r;
1236 	}
1237 
1238 	/* allocate visible FB for rlc auto-loading fw */
1239 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1240 		r = gfx_v12_1_rlc_autoload_buffer_init(adev);
1241 		if (r)
1242 			return r;
1243 	} else if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1244 		r = gfx_v12_1_init_cp_compute_microcode_bo(adev);
1245 		if (r)
1246 			return r;
1247 	}
1248 
1249 	r = gfx_v12_1_gpu_early_init(adev);
1250 	if (r)
1251 		return r;
1252 
1253 	r = amdgpu_gfx_sysfs_init(adev);
1254 	if (r)
1255 		return r;
1256 
1257 	return 0;
1258 }
1259 
1260 static void gfx_v12_1_rlc_autoload_buffer_fini(struct amdgpu_device *adev)
1261 {
1262 	amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo,
1263 			&adev->gfx.rlc.rlc_autoload_gpu_addr,
1264 			(void **)&adev->gfx.rlc.rlc_autoload_ptr);
1265 }
1266 
1267 static int gfx_v12_1_sw_fini(struct amdgpu_ip_block *ip_block)
1268 {
1269 	int i, num_xcc;
1270 	struct amdgpu_device *adev = ip_block->adev;
1271 
1272 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1273 	for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++)
1274 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1275 
1276 	for (i = 0; i < num_xcc; i++) {
1277 		amdgpu_gfx_mqd_sw_fini(adev, i);
1278 
1279 		if (!adev->enable_mes_kiq) {
1280 			amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring);
1281 			amdgpu_gfx_kiq_fini(adev, i);
1282 		}
1283 	}
1284 
1285 	gfx_v12_1_rlc_fini(adev);
1286 	gfx_v12_1_mec_fini(adev);
1287 
1288 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
1289 		gfx_v12_1_rlc_autoload_buffer_fini(adev);
1290 
1291 	gfx_v12_1_free_microcode(adev);
1292 	amdgpu_gfx_sysfs_fini(adev);
1293 
1294 	return 0;
1295 }
1296 
1297 static void gfx_v12_1_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num,
1298 				       u32 sh_num, u32 instance, int xcc_id)
1299 {
1300 	u32 data;
1301 
1302 	if (instance == 0xffffffff)
1303 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
1304 				     INSTANCE_BROADCAST_WRITES, 1);
1305 	else
1306 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
1307 				     instance);
1308 
1309 	if (se_num == 0xffffffff)
1310 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
1311 				     1);
1312 	else
1313 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1314 
1315 	if (sh_num == 0xffffffff)
1316 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES,
1317 				     1);
1318 	else
1319 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num);
1320 
1321 	WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data);
1322 }
1323 
1324 static u32 gfx_v12_1_get_sa_active_bitmap(struct amdgpu_device *adev,
1325 					  int xcc_id)
1326 {
1327 	u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask;
1328 
1329 	gc_disabled_sa_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SA_UNIT_DISABLE);
1330 	gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask,
1331 					    CC_GC_SA_UNIT_DISABLE,
1332 					    SA_DISABLE);
1333 	gc_user_disabled_sa_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SA_UNIT_DISABLE);
1334 	gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask,
1335 						 GC_USER_SA_UNIT_DISABLE,
1336 						 SA_DISABLE);
1337 	sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se *
1338 					    adev->gfx.config.max_shader_engines);
1339 
1340 	return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask));
1341 }
1342 
1343 static u32 gfx_v12_1_get_rb_active_bitmap(struct amdgpu_device *adev,
1344 					  int xcc_id)
1345 {
1346 	u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask;
1347 	u32 rb_mask;
1348 
1349 	gc_disabled_rb_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
1350 					   regCC_RB_BACKEND_DISABLE);
1351 	gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask,
1352 					    CC_RB_BACKEND_DISABLE,
1353 					    BACKEND_DISABLE);
1354 	gc_user_disabled_rb_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
1355 						regGC_USER_RB_BACKEND_DISABLE);
1356 	gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask,
1357 						 GC_USER_RB_BACKEND_DISABLE,
1358 						 BACKEND_DISABLE);
1359 	rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se *
1360 					    adev->gfx.config.max_shader_engines);
1361 
1362 	return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask));
1363 }
1364 
1365 static void gfx_v12_1_setup_rb(struct amdgpu_device *adev)
1366 {
1367 	u32 rb_bitmap_width_per_sa;
1368 	u32 max_sa;
1369 	u32 active_sa_bitmap;
1370 	u32 global_active_rb_bitmap;
1371 	u32 active_rb_bitmap = 0;
1372 	u32 i;
1373 	int xcc_id;
1374 
1375 	for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
1376 		/* query sa bitmap from SA_UNIT_DISABLE registers */
1377 		active_sa_bitmap = gfx_v12_1_get_sa_active_bitmap(adev, xcc_id);
1378 		/* query rb bitmap from RB_BACKEND_DISABLE registers */
1379 		global_active_rb_bitmap = gfx_v12_1_get_rb_active_bitmap(adev, xcc_id);
1380 
1381 		/* generate active rb bitmap according to active sa bitmap */
1382 		max_sa = adev->gfx.config.max_shader_engines *
1383 			 adev->gfx.config.max_sh_per_se;
1384 		rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
1385 					 adev->gfx.config.max_sh_per_se;
1386 		for (i = 0; i < max_sa; i++) {
1387 			if (active_sa_bitmap & (1 << i))
1388 				active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
1389 		}
1390 
1391 		active_rb_bitmap |= global_active_rb_bitmap;
1392 	}
1393 
1394 	adev->gfx.config.backend_enable_mask = active_rb_bitmap;
1395 	adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
1396 }
1397 
1398 static void gfx_v12_1_xcc_init_compute_vmid(struct amdgpu_device *adev,
1399 					    int xcc_id)
1400 {
1401 	int i;
1402 	uint32_t sh_mem_bases;
1403 	uint32_t data;
1404 
1405 	/*
1406 	 * Configure apertures:
1407 	 * LDS:         0x20000000'00000000 - 0x20000001'00000000 (4GB)
1408 	 * Scratch:     0x10000000'00000000 - 0x10000001'00000000 (4GB)
1409 	 */
1410 	sh_mem_bases = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1411 				     (adev->gmc.private_aperture_start >> 58));
1412 	sh_mem_bases = REG_SET_FIELD(sh_mem_bases, SH_MEM_BASES, SHARED_BASE,
1413 				     (adev->gmc.shared_aperture_start >> 48));
1414 
1415 	mutex_lock(&adev->srbm_mutex);
1416 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1417 		soc_v1_0_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1418 		/* CP and shaders */
1419 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1420 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases);
1421 
1422 		/* Enable trap for each kfd vmid. */
1423 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL);
1424 		data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
1425 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data);
1426 
1427 		/* Disable VGPR deallocation instruction for each KFD vmid. */
1428 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_DEBUG);
1429 		data = REG_SET_FIELD(data, SQ_DEBUG, DISABLE_VGPR_DEALLOC, 1);
1430 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_DEBUG, data);
1431 	}
1432 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1433 	mutex_unlock(&adev->srbm_mutex);
1434 }
1435 
1436 static void gfx_v12_1_tcp_harvest(struct amdgpu_device *adev)
1437 {
1438 	/* TODO: harvest feature to be added later. */
1439 }
1440 
1441 static void gfx_v12_1_get_tcc_info(struct amdgpu_device *adev)
1442 {
1443 }
1444 
1445 static void gfx_v12_1_xcc_constants_init(struct amdgpu_device *adev,
1446 					 int xcc_id)
1447 {
1448 	u32 tmp;
1449 	int i;
1450 
1451 	/* XXX SH_MEM regs */
1452 	/* where to put LDS, scratch, GPUVM in FSA64 space */
1453 	mutex_lock(&adev->srbm_mutex);
1454 	for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
1455 		soc_v1_0_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1456 		/* CP and shaders */
1457 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1458 			     regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1459 		if (i != 0) {
1460 			tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1461 				(adev->gmc.private_aperture_start >> 58));
1462 			tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1463 				(adev->gmc.shared_aperture_start >> 48));
1464 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, tmp);
1465 		}
1466 	}
1467 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1468 
1469 	mutex_unlock(&adev->srbm_mutex);
1470 
1471 	gfx_v12_1_xcc_init_compute_vmid(adev, xcc_id);
1472 }
1473 
1474 static void gfx_v12_1_constants_init(struct amdgpu_device *adev)
1475 {
1476 	int i, num_xcc;
1477 
1478 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1479 
1480 	gfx_v12_1_setup_rb(adev);
1481 	gfx_v12_1_get_cu_info(adev, &adev->gfx.cu_info);
1482 	gfx_v12_1_get_tcc_info(adev);
1483 	adev->gfx.config.pa_sc_tile_steering_override = 0;
1484 
1485 	for (i = 0; i < num_xcc; i++)
1486 		gfx_v12_1_xcc_constants_init(adev, i);
1487 }
1488 
1489 static void gfx_v12_1_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1490 						    bool enable, int xcc_id)
1491 {
1492 	u32 tmp;
1493 
1494 	if (amdgpu_sriov_vf(adev))
1495 		return;
1496 
1497 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0);
1498 
1499 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
1500 			    enable ? 1 : 0);
1501 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
1502 			    enable ? 1 : 0);
1503 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
1504 			    enable ? 1 : 0);
1505 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
1506 			    enable ? 1 : 0);
1507 
1508 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp);
1509 }
1510 
1511 static int gfx_v12_1_xcc_init_csb(struct amdgpu_device *adev,
1512 				  int xcc_id)
1513 {
1514 	adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
1515 
1516 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CSIB_ADDR_HI,
1517 			adev->gfx.rlc.clear_state_gpu_addr >> 32);
1518 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CSIB_ADDR_LO,
1519 			adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1520 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1521 		     regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
1522 
1523 	return 0;
1524 }
1525 
1526 static void gfx_v12_1_xcc_rlc_stop(struct amdgpu_device *adev,
1527 				   int xcc_id)
1528 {
1529 	u32 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CNTL);
1530 
1531 	tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
1532 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CNTL, tmp);
1533 }
1534 
1535 static void gfx_v12_1_rlc_stop(struct amdgpu_device *adev)
1536 {
1537 	int i, num_xcc;
1538 
1539 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1540 	for (i = 0; i < num_xcc; i++)
1541 		gfx_v12_1_xcc_rlc_stop(adev, i);
1542 }
1543 
1544 static void gfx_v12_1_xcc_rlc_reset(struct amdgpu_device *adev,
1545 				    int xcc_id)
1546 {
1547 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id),
1548 			      GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
1549 	udelay(50);
1550 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id),
1551 			      GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
1552 	udelay(50);
1553 }
1554 
1555 static void gfx_v12_1_rlc_reset(struct amdgpu_device *adev)
1556 {
1557 	int i, num_xcc;
1558 
1559 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1560 	for (i = 0; i < num_xcc; i++)
1561 		gfx_v12_1_xcc_rlc_reset(adev, i);
1562 }
1563 
1564 static void gfx_v12_1_xcc_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
1565 						 bool enable, int xcc_id)
1566 {
1567 	uint32_t rlc_pg_cntl;
1568 
1569 	rlc_pg_cntl = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_PG_CNTL);
1570 
1571 	if (!enable) {
1572 		/* RLC_PG_CNTL[23] = 0 (default)
1573 		 * RLC will wait for handshake acks with SMU
1574 		 * GFXOFF will be enabled
1575 		 * RLC_PG_CNTL[23] = 1
1576 		 * RLC will not issue any message to SMU
1577 		 * hence no handshake between SMU & RLC
1578 		 * GFXOFF will be disabled
1579 		 */
1580 		rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
1581 	} else
1582 		rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
1583 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_PG_CNTL, rlc_pg_cntl);
1584 }
1585 
1586 static void gfx_v12_1_xcc_rlc_start(struct amdgpu_device *adev,
1587 				    int xcc_id)
1588 {
1589 	/* TODO: enable rlc & smu handshake until smu
1590 	 * and gfxoff feature works as expected */
1591 	if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK))
1592 		gfx_v12_1_xcc_rlc_smu_handshake_cntl(adev, false, xcc_id);
1593 
1594 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL, RLC_ENABLE_F32, 1);
1595 	udelay(50);
1596 }
1597 
1598 static void gfx_v12_1_rlc_start(struct amdgpu_device *adev)
1599 {
1600 	int i, num_xcc;
1601 
1602 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1603 	for (i = 0; i < num_xcc; i++) {
1604 		gfx_v12_1_xcc_rlc_start(adev, i);
1605 	}
1606 }
1607 
1608 static void gfx_v12_1_xcc_rlc_enable_srm(struct amdgpu_device *adev,
1609 					 int xcc_id)
1610 {
1611 	uint32_t tmp;
1612 
1613 	/* enable Save Restore Machine */
1614 	tmp = RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SRM_CNTL));
1615 	tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1616 	tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
1617 	WREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SRM_CNTL), tmp);
1618 }
1619 
1620 static void gfx_v12_1_xcc_load_rlcg_microcode(struct amdgpu_device *adev,
1621 					      int xcc_id)
1622 {
1623 	const struct rlc_firmware_header_v2_0 *hdr;
1624 	const __le32 *fw_data;
1625 	unsigned i, fw_size;
1626 
1627 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1628 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1629 			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1630 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1631 
1632 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR,
1633 		     RLCG_UCODE_LOADING_START_ADDRESS);
1634 
1635 	for (i = 0; i < fw_size; i++)
1636 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1637 			     regRLC_GPM_UCODE_DATA,
1638 			     le32_to_cpup(fw_data++));
1639 
1640 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1641 		     regRLC_GPM_UCODE_ADDR,
1642 		     adev->gfx.rlc_fw_version);
1643 }
1644 
1645 static void gfx_v12_1_xcc_load_rlc_iram_dram_microcode(struct amdgpu_device *adev,
1646 						       int xcc_id)
1647 {
1648 	const struct rlc_firmware_header_v2_2 *hdr;
1649 	const __le32 *fw_data;
1650 	unsigned i, fw_size;
1651 	u32 tmp;
1652 
1653 	hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1654 
1655 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1656 			le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes));
1657 	fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4;
1658 
1659 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_LX6_IRAM_ADDR, 0);
1660 
1661 	for (i = 0; i < fw_size; i++) {
1662 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1663 			msleep(1);
1664 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1665 			     regRLC_LX6_IRAM_DATA,
1666 			     le32_to_cpup(fw_data++));
1667 	}
1668 
1669 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1670 		     regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
1671 
1672 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1673 			le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes));
1674 	fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4;
1675 
1676 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1677 		     regRLC_LX6_DRAM_ADDR, 0);
1678 	for (i = 0; i < fw_size; i++) {
1679 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1680 			msleep(1);
1681 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1682 			     regRLC_LX6_DRAM_DATA,
1683 			     le32_to_cpup(fw_data++));
1684 	}
1685 
1686 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1687 		     regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
1688 
1689 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_LX6_CNTL);
1690 	tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1);
1691 	tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0);
1692 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_LX6_CNTL, tmp);
1693 }
1694 
1695 static int gfx_v12_1_xcc_rlc_load_microcode(struct amdgpu_device *adev,
1696 					    int xcc_id)
1697 {
1698 	const struct rlc_firmware_header_v2_0 *hdr;
1699 	uint16_t version_major;
1700 	uint16_t version_minor;
1701 
1702 	if (!adev->gfx.rlc_fw)
1703 		return -EINVAL;
1704 
1705 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1706 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
1707 
1708 	version_major = le16_to_cpu(hdr->header.header_version_major);
1709 	version_minor = le16_to_cpu(hdr->header.header_version_minor);
1710 
1711 	if (version_major == 2) {
1712 		gfx_v12_1_xcc_load_rlcg_microcode(adev, xcc_id);
1713 		if (amdgpu_dpm == 1) {
1714 			if (version_minor >= 2)
1715 				gfx_v12_1_xcc_load_rlc_iram_dram_microcode(adev, xcc_id);
1716 		}
1717 
1718 		return 0;
1719 	}
1720 
1721 	return -EINVAL;
1722 }
1723 
1724 static int gfx_v12_1_xcc_rlc_resume(struct amdgpu_device *adev,
1725 				    int xcc_id)
1726 {
1727 	int r;
1728 
1729 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1730 		gfx_v12_1_xcc_init_csb(adev, xcc_id);
1731 
1732 		if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
1733 			gfx_v12_1_xcc_rlc_enable_srm(adev, xcc_id);
1734 	} else {
1735 		if (amdgpu_sriov_vf(adev)) {
1736 			gfx_v12_1_xcc_init_csb(adev, xcc_id);
1737 			return 0;
1738 		}
1739 
1740 		gfx_v12_1_xcc_rlc_stop(adev, xcc_id);
1741 
1742 		/* disable CG */
1743 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0);
1744 
1745 		/* disable PG */
1746 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_PG_CNTL, 0);
1747 
1748 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1749 			/* legacy rlc firmware loading */
1750 			r = gfx_v12_1_xcc_rlc_load_microcode(adev, xcc_id);
1751 			if (r)
1752 				return r;
1753 		}
1754 
1755 		gfx_v12_1_xcc_init_csb(adev, xcc_id);
1756 
1757 		gfx_v12_1_xcc_rlc_start(adev, xcc_id);
1758 	}
1759 
1760 	return 0;
1761 }
1762 
1763 static int gfx_v12_1_rlc_resume(struct amdgpu_device *adev)
1764 {
1765 	int r, i, num_xcc;
1766 
1767 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1768 	for (i = 0; i < num_xcc; i++) {
1769 		r = gfx_v12_1_xcc_rlc_resume(adev, i);
1770 		if (r)
1771 			return r;
1772 	}
1773 
1774 	return 0;
1775 }
1776 
1777 static void gfx_v12_1_xcc_config_gfx_rs64(struct amdgpu_device *adev,
1778 					  int xcc_id)
1779 {
1780 	const struct gfx_firmware_header_v2_0 *mec_hdr;
1781 	uint32_t pipe_id, tmp;
1782 
1783 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)
1784 		adev->gfx.mec_fw->data;
1785 
1786 	/* config mec program start addr */
1787 	for (pipe_id = 0; pipe_id < 4; pipe_id++) {
1788 		soc_v1_0_grbm_select(adev, 1, pipe_id, 0, 0, GET_INST(GC, xcc_id));
1789 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START,
1790 					mec_hdr->ucode_start_addr_lo >> 2 |
1791 					mec_hdr->ucode_start_addr_hi << 30);
1792 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START_HI,
1793 					mec_hdr->ucode_start_addr_hi >> 2);
1794 	}
1795 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1796 
1797 	/* reset mec pipe */
1798 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL);
1799 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1);
1800 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1);
1801 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1);
1802 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1);
1803 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL, tmp);
1804 
1805 	/* clear mec pipe reset */
1806 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0);
1807 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0);
1808 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0);
1809 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0);
1810 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL, tmp);
1811 }
1812 
1813 static void gfx_v12_1_config_gfx_rs64(struct amdgpu_device *adev)
1814 {
1815 	int i, num_xcc;
1816 
1817 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1818 
1819 	for (i = 0; i < num_xcc; i++)
1820 		gfx_v12_1_xcc_config_gfx_rs64(adev, i);
1821 }
1822 
1823 static void gfx_v12_1_xcc_set_mec_ucode_start_addr(struct amdgpu_device *adev,
1824 						   int xcc_id)
1825 {
1826 	const struct gfx_firmware_header_v2_0 *cp_hdr;
1827 	unsigned pipe_id;
1828 
1829 	cp_hdr = (const struct gfx_firmware_header_v2_0 *)
1830 		adev->gfx.mec_fw->data;
1831 	mutex_lock(&adev->srbm_mutex);
1832 	for (pipe_id = 0; pipe_id < adev->gfx.mec.num_pipe_per_mec; pipe_id++) {
1833 		soc_v1_0_grbm_select(adev, 1, pipe_id, 0, 0, GET_INST(GC, xcc_id));
1834 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START,
1835 			     cp_hdr->ucode_start_addr_lo >> 2 |
1836 			     cp_hdr->ucode_start_addr_hi << 30);
1837 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START_HI,
1838 			     cp_hdr->ucode_start_addr_hi >> 2);
1839 	}
1840 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1841 	mutex_unlock(&adev->srbm_mutex);
1842 }
1843 
1844 static int gfx_v12_1_xcc_wait_for_rlc_autoload_complete(struct amdgpu_device *adev,
1845 							int xcc_id)
1846 {
1847 	uint32_t cp_status;
1848 	uint32_t bootload_status;
1849 	int i;
1850 
1851 	for (i = 0; i < adev->usec_timeout; i++) {
1852 		cp_status = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_STAT);
1853 		bootload_status = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
1854 					       regRLC_RLCS_BOOTLOAD_STATUS);
1855 
1856 		if ((cp_status == 0) &&
1857 		    (REG_GET_FIELD(bootload_status,
1858 			RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) {
1859 			break;
1860 		}
1861 		udelay(1);
1862 		if (amdgpu_emu_mode)
1863 			msleep(10);
1864 	}
1865 
1866 	if (i >= adev->usec_timeout) {
1867 		dev_err(adev->dev,
1868 			"rlc autoload: xcc%d gc ucode autoload timeout\n", xcc_id);
1869 		return -ETIMEDOUT;
1870 	}
1871 
1872 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1873 		gfx_v12_1_xcc_set_mec_ucode_start_addr(adev, xcc_id);
1874 	}
1875 
1876 	return 0;
1877 }
1878 
1879 static int gfx_v12_1_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
1880 {
1881 	int xcc_id;
1882 
1883 	for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++)
1884 		gfx_v12_1_xcc_wait_for_rlc_autoload_complete(adev, xcc_id);
1885 
1886 	return 0;
1887 }
1888 
1889 static void gfx_v12_1_xcc_cp_compute_enable(struct amdgpu_device *adev,
1890 					    bool enable, int xcc_id)
1891 {
1892 	u32 data;
1893 
1894 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL);
1895 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE,
1896 						 enable ? 0 : 1);
1897 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET,
1898 						 enable ? 0 : 1);
1899 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET,
1900 						 enable ? 0 : 1);
1901 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET,
1902 						 enable ? 0 : 1);
1903 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET,
1904 						 enable ? 0 : 1);
1905 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE,
1906 						 enable ? 1 : 0);
1907 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE,
1908 			                         enable ? 1 : 0);
1909 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE,
1910 						 enable ? 1 : 0);
1911 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE,
1912 						 enable ? 1 : 0);
1913 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT,
1914 						 enable ? 0 : 1);
1915 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL, data);
1916 
1917 	adev->gfx.kiq[xcc_id].ring.sched.ready = enable;
1918 
1919 	udelay(50);
1920 }
1921 
1922 static int gfx_v12_1_init_cp_compute_microcode_bo(struct amdgpu_device *adev)
1923 {
1924 	const struct gfx_firmware_header_v2_0 *mec_hdr;
1925 	const __le32 *fw_ucode, *fw_data;
1926 	u32 fw_ucode_size, fw_data_size;
1927 	u32 *fw_ucode_ptr, *fw_data_ptr;
1928 	int i, r, xcc_id;
1929 
1930 	if (!adev->gfx.mec_fw)
1931 		return -EINVAL;
1932 
1933 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
1934 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
1935 
1936 	fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data +
1937 				le32_to_cpu(mec_hdr->ucode_offset_bytes));
1938 	fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes);
1939 
1940 	fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1941 				le32_to_cpu(mec_hdr->data_offset_bytes));
1942 	fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes);
1943 
1944 	if (adev->gfx.mec.mec_fw_obj == NULL) {
1945 		r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
1946 					      64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
1947 					      &adev->gfx.mec.mec_fw_obj,
1948 					      &adev->gfx.mec.mec_fw_gpu_addr,
1949 					      (void **)&fw_ucode_ptr);
1950 		if (r) {
1951 			dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
1952 			gfx_v12_1_mec_fini(adev);
1953 			return r;
1954 		}
1955 
1956 		memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size);
1957 
1958 		amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1959 		amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1960 	}
1961 
1962 	if (adev->gfx.mec.mec_fw_data_obj == NULL) {
1963 		r = amdgpu_bo_create_reserved(adev,
1964 					      ALIGN(fw_data_size, 64 * 1024) *
1965 					      adev->gfx.mec.num_pipe_per_mec * NUM_XCC(adev->gfx.xcc_mask),
1966 					      64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
1967 					      &adev->gfx.mec.mec_fw_data_obj,
1968 					      &adev->gfx.mec.mec_fw_data_gpu_addr,
1969 					      (void **)&fw_data_ptr);
1970 		if (r) {
1971 			dev_err(adev->dev, "(%d) failed to create mec fw data bo\n", r);
1972 			gfx_v12_1_mec_fini(adev);
1973 			return r;
1974 		}
1975 
1976 		for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
1977 			for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
1978 				u32 offset = (xcc_id * adev->gfx.mec.num_pipe_per_mec + i) *
1979 					     ALIGN(fw_data_size, 64 * 1024) / 4;
1980 				memcpy(fw_data_ptr + offset, fw_data, fw_data_size);
1981 			}
1982 		}
1983 
1984 		amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj);
1985 		amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj);
1986 	}
1987 
1988 	return 0;
1989 }
1990 
1991 static int gfx_v12_1_xcc_cp_compute_load_microcode_rs64(struct amdgpu_device *adev,
1992 							int xcc_id)
1993 {
1994 	const struct gfx_firmware_header_v2_0 *mec_hdr;
1995 	u32 fw_data_size;
1996 	u32 tmp, i, usec_timeout = 50000; /* Wait for 50 ms */
1997 
1998 	if (!adev->gfx.mec_fw)
1999 		return -EINVAL;
2000 
2001 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
2002 	fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes);
2003 
2004 	gfx_v12_1_xcc_cp_compute_enable(adev, false, xcc_id);
2005 
2006 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL);
2007 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2008 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2009 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2010 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp);
2011 
2012 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_BASE_CNTL);
2013 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
2014 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
2015 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_BASE_CNTL, tmp);
2016 
2017 	mutex_lock(&adev->srbm_mutex);
2018 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
2019 		soc_v1_0_grbm_select(adev, 1, i, 0, 0, GET_INST(GC, xcc_id));
2020 
2021 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_MDBASE_LO,
2022 			     lower_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr +
2023 					   (xcc_id * adev->gfx.mec.num_pipe_per_mec + i) *
2024 					   ALIGN(fw_data_size, 64 * 1024)));
2025 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_MDBASE_HI,
2026 			     upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr +
2027 					   (xcc_id * adev->gfx.mec.num_pipe_per_mec + i) *
2028 					   ALIGN(fw_data_size, 64 * 1024)));
2029 
2030 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO,
2031 				lower_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2032 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI,
2033 				upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2034 	}
2035 	mutex_unlock(&adev->srbm_mutex);
2036 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2037 
2038 	/* Trigger an invalidation of the L1 instruction caches */
2039 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_OP_CNTL);
2040 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2041 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_OP_CNTL, tmp);
2042 
2043 	/* Wait for invalidation complete */
2044 	for (i = 0; i < usec_timeout; i++) {
2045 		tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_OP_CNTL);
2046 		if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
2047 				       INVALIDATE_DCACHE_COMPLETE))
2048 			break;
2049 		udelay(1);
2050 	}
2051 
2052 	if (i >= usec_timeout) {
2053 		dev_err(adev->dev, "failed to invalidate data cache\n");
2054 		return -EINVAL;
2055 	}
2056 
2057 	/* Trigger an invalidation of the L1 instruction caches */
2058 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_OP_CNTL);
2059 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2060 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_OP_CNTL, tmp);
2061 
2062 	/* Wait for invalidation complete */
2063 	for (i = 0; i < usec_timeout; i++) {
2064 		tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_OP_CNTL);
2065 		if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2066 				       INVALIDATE_CACHE_COMPLETE))
2067 			break;
2068 		udelay(1);
2069 	}
2070 
2071 	if (i >= usec_timeout) {
2072 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2073 		return -EINVAL;
2074 	}
2075 
2076 	gfx_v12_1_xcc_set_mec_ucode_start_addr(adev, xcc_id);
2077 
2078 	return 0;
2079 }
2080 
2081 static void gfx_v12_1_xcc_kiq_setting(struct amdgpu_ring *ring,
2082 				      int xcc_id)
2083 {
2084 	uint32_t tmp;
2085 	struct amdgpu_device *adev = ring->adev;
2086 
2087 	/* tell RLC which is KIQ queue */
2088 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
2089 	tmp &= 0xffffff00;
2090 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2091 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
2092 	tmp |= 0x80;
2093 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
2094 }
2095 
2096 static void gfx_v12_1_xcc_cp_set_doorbell_range(struct amdgpu_device *adev,
2097 						int xcc_id)
2098 {
2099 	/* disable gfx engine doorbell range */
2100 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_DOORBELL_RANGE_LOWER, 0);
2101 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_DOORBELL_RANGE_UPPER, 0);
2102 
2103 	/* set compute engine doorbell range */
2104 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DOORBELL_RANGE_LOWER,
2105 		     ((adev->doorbell_index.kiq +
2106 		       xcc_id * adev->doorbell_index.xcc_doorbell_range) *
2107 		      2) << 2);
2108 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DOORBELL_RANGE_UPPER,
2109 		     ((adev->doorbell_index.userqueue_end +
2110 		       xcc_id * adev->doorbell_index.xcc_doorbell_range) *
2111 		      2) << 2);
2112 }
2113 
2114 static int gfx_v12_1_compute_mqd_init(struct amdgpu_device *adev, void *m,
2115 				      struct amdgpu_mqd_prop *prop)
2116 {
2117 	struct v12_1_compute_mqd *mqd = m;
2118 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
2119 	uint32_t tmp;
2120 
2121 	mqd->header = 0xC0310800;
2122 	mqd->compute_pipelinestat_enable = 0x00000001;
2123 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
2124 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
2125 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
2126 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2127 	mqd->compute_misc_reserved = 0x00000007;
2128 
2129 	eop_base_addr = prop->eop_gpu_addr >> 8;
2130 	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
2131 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
2132 
2133 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2134 	tmp = regCP_HQD_EOP_CONTROL_DEFAULT;
2135 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
2136 			(order_base_2(GFX12_MEC_HPD_SIZE / 4) - 1));
2137 
2138 	mqd->cp_hqd_eop_control = tmp;
2139 
2140 	/* enable doorbell? */
2141 	tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
2142 
2143 	if (prop->use_doorbell) {
2144 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2145 				    DOORBELL_OFFSET, prop->doorbell_index);
2146 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2147 				    DOORBELL_EN, 1);
2148 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2149 				    DOORBELL_SOURCE, 0);
2150 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2151 				    DOORBELL_HIT, 0);
2152 	} else {
2153 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2154 				    DOORBELL_EN, 0);
2155 	}
2156 
2157 	mqd->cp_hqd_pq_doorbell_control = tmp;
2158 
2159 	/* disable the queue if it's active */
2160 	mqd->cp_hqd_dequeue_request = 0;
2161 	mqd->cp_hqd_pq_rptr = 0;
2162 	mqd->cp_hqd_pq_wptr_lo = 0;
2163 	mqd->cp_hqd_pq_wptr_hi = 0;
2164 
2165 	/* set the pointer to the MQD */
2166 	mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc;
2167 	mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
2168 
2169 	/* set MQD vmid to 0 */
2170 	tmp = regCP_MQD_CONTROL_DEFAULT;
2171 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
2172 	mqd->cp_mqd_control = tmp;
2173 
2174 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2175 	hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
2176 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
2177 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2178 
2179 	/* set up the HQD, this is similar to CP_RB0_CNTL */
2180 	tmp = regCP_HQD_PQ_CONTROL_DEFAULT;
2181 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
2182 			    (order_base_2(prop->queue_size / 4) - 1));
2183 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
2184 			    (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
2185 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
2186 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
2187 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
2188 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
2189 	mqd->cp_hqd_pq_control = tmp;
2190 
2191 	/* set the wb address whether it's enabled or not */
2192 	wb_gpu_addr = prop->rptr_gpu_addr;
2193 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
2194 	mqd->cp_hqd_pq_rptr_report_addr_hi =
2195 		upper_32_bits(wb_gpu_addr) & 0xffff;
2196 
2197 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2198 	wb_gpu_addr = prop->wptr_gpu_addr;
2199 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2200 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2201 
2202 	tmp = 0;
2203 	/* enable the doorbell if requested */
2204 	if (prop->use_doorbell) {
2205 		tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
2206 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2207 				DOORBELL_OFFSET, prop->doorbell_index);
2208 
2209 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2210 				    DOORBELL_EN, 1);
2211 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2212 				    DOORBELL_SOURCE, 0);
2213 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2214 				    DOORBELL_HIT, 0);
2215 	}
2216 
2217 	mqd->cp_hqd_pq_doorbell_control = tmp;
2218 
2219 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2220 	mqd->cp_hqd_pq_rptr = regCP_HQD_PQ_RPTR_DEFAULT;
2221 
2222 	/* set the vmid for the queue */
2223 	mqd->cp_hqd_vmid = 0;
2224 
2225 	tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT;
2226 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x63);
2227 	mqd->cp_hqd_persistent_state = tmp;
2228 
2229 	/* set MIN_IB_AVAIL_SIZE */
2230 	tmp = regCP_HQD_IB_CONTROL_DEFAULT;
2231 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 1);
2232 	mqd->cp_hqd_ib_control = tmp;
2233 
2234 	/* set static priority for a compute queue/ring */
2235 	mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority;
2236 	mqd->cp_hqd_queue_priority = prop->hqd_queue_priority;
2237 
2238 	mqd->cp_mqd_stride_size = prop->mqd_stride_size ? prop->mqd_stride_size :
2239 		AMDGPU_MQD_SIZE_ALIGN(adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size);
2240 
2241 	mqd->cp_hqd_active = prop->hqd_active;
2242 
2243 	return 0;
2244 }
2245 
2246 static int gfx_v12_1_xcc_kiq_init_register(struct amdgpu_ring *ring,
2247 					   int xcc_id)
2248 {
2249 	struct amdgpu_device *adev = ring->adev;
2250 	struct v12_1_compute_mqd *mqd = ring->mqd_ptr;
2251 	int j;
2252 
2253 	/* inactivate the queue */
2254 	if (amdgpu_sriov_vf(adev))
2255 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0);
2256 
2257 	/* disable wptr polling */
2258 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
2259 
2260 	/* write the EOP addr */
2261 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR,
2262 	       mqd->cp_hqd_eop_base_addr_lo);
2263 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI,
2264 	       mqd->cp_hqd_eop_base_addr_hi);
2265 
2266 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2267 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL,
2268 	       mqd->cp_hqd_eop_control);
2269 
2270 	/* enable doorbell? */
2271 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
2272 	       mqd->cp_hqd_pq_doorbell_control);
2273 
2274 	/* disable the queue if it's active */
2275 	if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
2276 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
2277 		for (j = 0; j < adev->usec_timeout; j++) {
2278 			if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
2279 				break;
2280 			udelay(1);
2281 		}
2282 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
2283 		       mqd->cp_hqd_dequeue_request);
2284 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR,
2285 		       mqd->cp_hqd_pq_rptr);
2286 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
2287 		       mqd->cp_hqd_pq_wptr_lo);
2288 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
2289 		       mqd->cp_hqd_pq_wptr_hi);
2290 	}
2291 
2292 	/* set the pointer to the MQD */
2293 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR,
2294 	       mqd->cp_mqd_base_addr_lo);
2295 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI,
2296 	       mqd->cp_mqd_base_addr_hi);
2297 
2298 	/* set MQD vmid to 0 */
2299 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL,
2300 	       mqd->cp_mqd_control);
2301 
2302 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2303 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE,
2304 	       mqd->cp_hqd_pq_base_lo);
2305 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI,
2306 	       mqd->cp_hqd_pq_base_hi);
2307 
2308 	/* set up the HQD, this is similar to CP_RB0_CNTL */
2309 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL,
2310 	       mqd->cp_hqd_pq_control);
2311 
2312 	/* set the wb address whether it's enabled or not */
2313 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR,
2314 		mqd->cp_hqd_pq_rptr_report_addr_lo);
2315 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
2316 		mqd->cp_hqd_pq_rptr_report_addr_hi);
2317 
2318 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2319 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR,
2320 	       mqd->cp_hqd_pq_wptr_poll_addr_lo);
2321 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
2322 	       mqd->cp_hqd_pq_wptr_poll_addr_hi);
2323 
2324 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
2325 	       mqd->cp_hqd_pq_doorbell_control);
2326 
2327 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2328 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
2329 	       mqd->cp_hqd_pq_wptr_lo);
2330 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
2331 	       mqd->cp_hqd_pq_wptr_hi);
2332 
2333 	/* set the vmid for the queue */
2334 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_VMID, mqd->cp_hqd_vmid);
2335 
2336 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE,
2337 	       mqd->cp_hqd_persistent_state);
2338 
2339 	/* activate the queue */
2340 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE,
2341 	       mqd->cp_hqd_active);
2342 
2343 	if (ring->use_doorbell)
2344 		WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_STATUS, DOORBELL_ENABLE, 1);
2345 
2346 	return 0;
2347 }
2348 
2349 static int gfx_v12_1_xcc_kiq_init_queue(struct amdgpu_ring *ring,
2350 					int xcc_id)
2351 {
2352 	struct amdgpu_device *adev = ring->adev;
2353 	struct v12_1_compute_mqd *mqd = ring->mqd_ptr;
2354 
2355 	gfx_v12_1_xcc_kiq_setting(ring, xcc_id);
2356 
2357 	if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
2358 		/* reset MQD to a clean status */
2359 		if (adev->gfx.kiq[xcc_id].mqd_backup)
2360 			memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(*mqd));
2361 
2362 		/* reset ring buffer */
2363 		ring->wptr = 0;
2364 		amdgpu_ring_clear_ring(ring);
2365 
2366 		mutex_lock(&adev->srbm_mutex);
2367 		soc_v1_0_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2368 		gfx_v12_1_xcc_kiq_init_register(ring, xcc_id);
2369 		soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2370 		mutex_unlock(&adev->srbm_mutex);
2371 	} else {
2372 		memset((void *)mqd, 0, sizeof(*mqd));
2373 		if (amdgpu_sriov_vf(adev) && adev->in_suspend)
2374 			amdgpu_ring_clear_ring(ring);
2375 		mutex_lock(&adev->srbm_mutex);
2376 		soc_v1_0_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2377 		amdgpu_ring_init_mqd(ring);
2378 		gfx_v12_1_xcc_kiq_init_register(ring, xcc_id);
2379 		soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2380 		mutex_unlock(&adev->srbm_mutex);
2381 
2382 		if (adev->gfx.kiq[xcc_id].mqd_backup)
2383 			memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(*mqd));
2384 	}
2385 
2386 	return 0;
2387 }
2388 
2389 static int gfx_v12_1_xcc_kcq_init_queue(struct amdgpu_ring *ring,
2390 					int xcc_id)
2391 {
2392 	struct amdgpu_device *adev = ring->adev;
2393 	struct v12_1_compute_mqd *mqd = ring->mqd_ptr;
2394 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
2395 
2396 	if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2397 		memset((void *)mqd, 0, sizeof(*mqd));
2398 		mutex_lock(&adev->srbm_mutex);
2399 		soc_v1_0_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2400 		amdgpu_ring_init_mqd(ring);
2401 		soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2402 		mutex_unlock(&adev->srbm_mutex);
2403 
2404 		if (adev->gfx.mec.mqd_backup[mqd_idx])
2405 			memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
2406 	} else {
2407 		/* restore MQD to a clean status */
2408 		if (adev->gfx.mec.mqd_backup[mqd_idx])
2409 			memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
2410 		/* reset ring buffer */
2411 		ring->wptr = 0;
2412 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
2413 		amdgpu_ring_clear_ring(ring);
2414 	}
2415 
2416 	return 0;
2417 }
2418 
2419 static int gfx_v12_1_xcc_kiq_resume(struct amdgpu_device *adev,
2420 				    int xcc_id)
2421 {
2422 	struct amdgpu_ring *ring;
2423 	int r;
2424 
2425 	ring = &adev->gfx.kiq[xcc_id].ring;
2426 
2427 	r = amdgpu_bo_reserve(ring->mqd_obj, false);
2428 	if (unlikely(r != 0))
2429 		return r;
2430 
2431 	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2432 	if (unlikely(r != 0)) {
2433 		amdgpu_bo_unreserve(ring->mqd_obj);
2434 		return r;
2435 	}
2436 
2437 	gfx_v12_1_xcc_kiq_init_queue(ring, xcc_id);
2438 	amdgpu_bo_kunmap(ring->mqd_obj);
2439 	ring->mqd_ptr = NULL;
2440 	amdgpu_bo_unreserve(ring->mqd_obj);
2441 	ring->sched.ready = true;
2442 	return 0;
2443 }
2444 
2445 static int gfx_v12_1_xcc_kcq_resume(struct amdgpu_device *adev,
2446 				    int xcc_id)
2447 {
2448 	struct amdgpu_ring *ring = NULL;
2449 	int r = 0, i;
2450 
2451 	if (!amdgpu_async_gfx_ring)
2452 		gfx_v12_1_xcc_cp_compute_enable(adev, true, xcc_id);
2453 
2454 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2455 		ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
2456 
2457 		r = amdgpu_bo_reserve(ring->mqd_obj, false);
2458 		if (unlikely(r != 0))
2459 			goto done;
2460 		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2461 		if (!r) {
2462 			r = gfx_v12_1_xcc_kcq_init_queue(ring, xcc_id);
2463 			amdgpu_bo_kunmap(ring->mqd_obj);
2464 			ring->mqd_ptr = NULL;
2465 		}
2466 		amdgpu_bo_unreserve(ring->mqd_obj);
2467 		if (r)
2468 			goto done;
2469 	}
2470 
2471 	r = amdgpu_gfx_enable_kcq(adev, xcc_id);
2472 done:
2473 	return r;
2474 }
2475 
2476 static int gfx_v12_1_xcc_cp_resume(struct amdgpu_device *adev, uint16_t xcc_mask)
2477 {
2478 	int r, i, xcc_id;
2479 	struct amdgpu_ring *ring;
2480 
2481 	for_each_inst(xcc_id, xcc_mask) {
2482 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
2483 			/* legacy firmware loading */
2484 			r = gfx_v12_1_xcc_cp_compute_load_microcode_rs64(adev, xcc_id);
2485 			if (r)
2486 				return r;
2487 		}
2488 
2489 		/* GFX CGCG and LS is set by default */
2490 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2491 			gfx_v12_1_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
2492 
2493 		gfx_v12_1_xcc_cp_set_doorbell_range(adev, xcc_id);
2494 
2495 		gfx_v12_1_xcc_cp_compute_enable(adev, true, xcc_id);
2496 
2497 		if (adev->enable_mes_kiq && adev->mes.kiq_hw_init)
2498 			r = amdgpu_mes_kiq_hw_init(adev, xcc_id);
2499 		else
2500 			r = gfx_v12_1_xcc_kiq_resume(adev, xcc_id);
2501 		if (r)
2502 			return r;
2503 
2504 		r = gfx_v12_1_xcc_kcq_resume(adev, xcc_id);
2505 		if (r)
2506 			return r;
2507 
2508 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2509 			ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
2510 			r = amdgpu_ring_test_helper(ring);
2511 			if (r)
2512 				return r;
2513 		}
2514 	}
2515 
2516 	return 0;
2517 }
2518 
2519 static int gfx_v12_1_cp_resume(struct amdgpu_device *adev)
2520 {
2521 	int num_xcc, num_xcp, num_xcc_per_xcp;
2522 	uint16_t xcc_mask;
2523 	int r = 0;
2524 
2525 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2526 	if (amdgpu_sriov_vf(adev)) {
2527 		enum amdgpu_gfx_partition mode;
2528 
2529 		mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2530 						       AMDGPU_XCP_FL_NONE);
2531 		if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2532 			return -EINVAL;
2533 		if (adev->gfx.funcs &&
2534 		    adev->gfx.funcs->get_xccs_per_xcp) {
2535 			num_xcc_per_xcp = adev->gfx.funcs->get_xccs_per_xcp(adev);
2536 			adev->gfx.num_xcc_per_xcp = num_xcc_per_xcp;
2537 			num_xcp = num_xcc / num_xcc_per_xcp;
2538 		} else {
2539 			return -EINVAL;
2540 		}
2541 		r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode);
2542 
2543 	} else {
2544 		if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2545 						    AMDGPU_XCP_FL_NONE) ==
2546 		    AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2547 			r = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr,
2548 							     amdgpu_user_partt_mode);
2549 	}
2550 
2551 	if (r)
2552 		return r;
2553 
2554 	xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
2555 
2556 	return gfx_v12_1_xcc_cp_resume(adev, xcc_mask);
2557 }
2558 
2559 static int gfx_v12_1_gfxhub_enable(struct amdgpu_device *adev)
2560 {
2561 	int r, i;
2562 	bool value;
2563 
2564 	r = adev->gfxhub.funcs->gart_enable(adev);
2565 	if (r)
2566 		return r;
2567 
2568 	value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
2569 		false : true;
2570 
2571 	adev->gfxhub.funcs->set_fault_enable_default(adev, value);
2572 	/* TODO investigate why TLB flush is needed,
2573 	 * are we missing a flush somewhere else? */
2574 	for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
2575 		if (AMDGPU_IS_GFXHUB(i))
2576 			adev->gmc.gmc_funcs->flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(i), 0);
2577 	}
2578 
2579 	return 0;
2580 }
2581 
2582 static int get_gb_addr_config(struct amdgpu_device *adev)
2583 {
2584 	u32 gb_addr_config;
2585 
2586 	gb_addr_config = RREG32_SOC15(GC, GET_INST(GC, 0), regGB_ADDR_CONFIG_READ);
2587 	if (gb_addr_config == 0)
2588 		return -EINVAL;
2589 
2590 	adev->gfx.config.gb_addr_config_fields.num_pkrs =
2591 		1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG_READ, NUM_PKRS);
2592 
2593 	adev->gfx.config.gb_addr_config = gb_addr_config;
2594 
2595 	adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
2596 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2597 				      GB_ADDR_CONFIG_READ, NUM_PIPES);
2598 
2599 	adev->gfx.config.max_tile_pipes =
2600 		adev->gfx.config.gb_addr_config_fields.num_pipes;
2601 
2602 	adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
2603 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2604 				      GB_ADDR_CONFIG_READ, MAX_COMPRESSED_FRAGS);
2605 	adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
2606 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2607 				      GB_ADDR_CONFIG_READ, NUM_RB_PER_SE);
2608 	adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
2609 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2610 				      GB_ADDR_CONFIG_READ, NUM_SHADER_ENGINES);
2611 	adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
2612 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2613 				      GB_ADDR_CONFIG_READ, PIPE_INTERLEAVE_SIZE));
2614 
2615 	return 0;
2616 }
2617 
2618 static void gfx_v12_1_xcc_disable_gpa_mode(struct amdgpu_device *adev,
2619 					   int xcc_id)
2620 {
2621 	uint32_t data;
2622 
2623 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG);
2624 	data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK;
2625 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data);
2626 
2627 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPG_PSP_DEBUG);
2628 	data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK;
2629 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPG_PSP_DEBUG, data);
2630 }
2631 
2632 static void gfx_v12_1_xcc_setup_tcp_thrashing_ctrl(struct amdgpu_device *adev,
2633 					 int xcc_id)
2634 {
2635 	uint32_t val;
2636 
2637 	/* Set the TCP UTCL0 register to enable atomics */
2638 	val = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2639 					regTCP_UTCL0_THRASHING_CTRL);
2640 	val = REG_SET_FIELD(val, TCP_UTCL0_THRASHING_CTRL, THRASHING_EN, 0x2);
2641 	val = REG_SET_FIELD(val, TCP_UTCL0_THRASHING_CTRL,
2642 					RETRY_FRAGMENT_THRESHOLD_UP_EN, 0x1);
2643 	val = REG_SET_FIELD(val, TCP_UTCL0_THRASHING_CTRL,
2644 					RETRY_FRAGMENT_THRESHOLD_DOWN_EN, 0x1);
2645 
2646 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2647 					regTCP_UTCL0_THRASHING_CTRL, val);
2648 }
2649 
2650 static void gfx_v12_1_xcc_enable_atomics(struct amdgpu_device *adev,
2651 					 int xcc_id)
2652 {
2653 	uint32_t data;
2654 
2655 	/* Set the TCP UTCL0 register to enable atomics */
2656 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regTCP_UTCL0_CNTL1);
2657 	data = REG_SET_FIELD(data, TCP_UTCL0_CNTL1, ATOMIC_REQUESTER_EN, 0x1);
2658 
2659 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regTCP_UTCL0_CNTL1, data);
2660 }
2661 
2662 static void gfx_v12_1_xcc_disable_burst(struct amdgpu_device *adev,
2663 					int xcc_id)
2664 {
2665 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGL1_DRAM_BURST_CTRL, 0xf);
2666 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGLARB_DRAM_BURST_CTRL, 0xf);
2667 }
2668 
2669 static void gfx_v12_1_xcc_disable_early_write_ack(struct amdgpu_device *adev,
2670 					int xcc_id)
2671 {
2672 	uint32_t data;
2673 
2674 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regTCP_CNTL3);
2675 	data = REG_SET_FIELD(data, TCP_CNTL3, DISABLE_EARLY_WRITE_ACK, 0x1);
2676 
2677 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regTCP_CNTL3, data);
2678 }
2679 
2680 static void gfx_v12_1_xcc_disable_tcp_spill_cache(struct amdgpu_device *adev,
2681 					int xcc_id)
2682 {
2683 	uint32_t data;
2684 
2685 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regTCP_CNTL);
2686 	data = REG_SET_FIELD(data, TCP_CNTL, TCP_SPILL_CACHE_DISABLE, 0x1);
2687 
2688 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regTCP_CNTL, data);
2689 }
2690 
2691 static void gfx_v12_1_init_golden_registers(struct amdgpu_device *adev)
2692 {
2693 	int i;
2694 
2695 	for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); i++) {
2696 		gfx_v12_1_xcc_disable_burst(adev, i);
2697 		gfx_v12_1_xcc_enable_atomics(adev, i);
2698 		gfx_v12_1_xcc_setup_tcp_thrashing_ctrl(adev, i);
2699 		gfx_v12_1_xcc_disable_early_write_ack(adev, i);
2700 		gfx_v12_1_xcc_disable_tcp_spill_cache(adev, i);
2701 	}
2702 }
2703 
2704 static int gfx_v12_1_hw_init(struct amdgpu_ip_block *ip_block)
2705 {
2706 	int r, i, num_xcc;
2707 	struct amdgpu_device *adev = ip_block->adev;
2708 
2709 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
2710 		/* rlc autoload firmware */
2711 		r = gfx_v12_1_rlc_backdoor_autoload_enable(adev);
2712 		if (r)
2713 			return r;
2714 	} else {
2715 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
2716 			num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2717 
2718 			if (adev->gfx.imu.funcs) {
2719 				if (adev->gfx.imu.funcs->load_microcode)
2720 					adev->gfx.imu.funcs->load_microcode(adev);
2721 			}
2722 
2723 			for (i = 0; i < num_xcc; i++) {
2724 				/* disable gpa mode in backdoor loading */
2725 				gfx_v12_1_xcc_disable_gpa_mode(adev, i);
2726 			}
2727 		}
2728 	}
2729 
2730 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) ||
2731 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2732 		r = gfx_v12_1_wait_for_rlc_autoload_complete(adev);
2733 		if (r) {
2734 			dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r);
2735 			return r;
2736 		}
2737 	}
2738 
2739 	adev->gfx.is_poweron = true;
2740 
2741 	if (get_gb_addr_config(adev))
2742 		DRM_WARN("Invalid gb_addr_config !\n");
2743 
2744 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2745 		gfx_v12_1_config_gfx_rs64(adev);
2746 
2747 	r = gfx_v12_1_gfxhub_enable(adev);
2748 	if (r)
2749 		return r;
2750 
2751 	gfx_v12_1_init_golden_registers(adev);
2752 
2753 	gfx_v12_1_constants_init(adev);
2754 
2755 	if (adev->nbio.funcs->gc_doorbell_init)
2756 		adev->nbio.funcs->gc_doorbell_init(adev);
2757 
2758 	r = gfx_v12_1_rlc_resume(adev);
2759 	if (r)
2760 		return r;
2761 
2762 	/*
2763 	 * init golden registers and rlc resume may override some registers,
2764 	 * reconfig them here
2765 	 */
2766 	gfx_v12_1_tcp_harvest(adev);
2767 
2768 	r = gfx_v12_1_cp_resume(adev);
2769 	if (r)
2770 		return r;
2771 
2772 	return r;
2773 }
2774 
2775 static void gfx_v12_1_xcc_fini(struct amdgpu_device *adev,
2776 			      int xcc_id)
2777 {
2778 	uint32_t tmp;
2779 
2780 	if (!adev->no_hw_access) {
2781 		if (amdgpu_gfx_disable_kcq(adev, xcc_id))
2782 			DRM_ERROR("KCQ disable failed\n");
2783 
2784 		amdgpu_mes_kiq_hw_fini(adev, xcc_id);
2785 	}
2786 
2787 	if (amdgpu_sriov_vf(adev)) {
2788 		/* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
2789 		tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
2790 		tmp &= 0xffffff00;
2791 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
2792 	}
2793 	gfx_v12_1_xcc_cp_compute_enable(adev, false, xcc_id);
2794 	gfx_v12_1_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
2795 }
2796 
2797 static int gfx_v12_1_hw_fini(struct amdgpu_ip_block *ip_block)
2798 {
2799 	struct amdgpu_device *adev = ip_block->adev;
2800 	int i, num_xcc;
2801 
2802 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
2803 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
2804 
2805 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2806 	for (i = 0; i < num_xcc; i++) {
2807 		gfx_v12_1_xcc_fini(adev, i);
2808 	}
2809 
2810 	adev->gfxhub.funcs->gart_disable(adev);
2811 
2812 	adev->gfx.is_poweron = false;
2813 
2814 	return 0;
2815 }
2816 
2817 static int gfx_v12_1_suspend(struct amdgpu_ip_block *ip_block)
2818 {
2819 	return gfx_v12_1_hw_fini(ip_block);
2820 }
2821 
2822 static int gfx_v12_1_resume(struct amdgpu_ip_block *ip_block)
2823 {
2824 	return gfx_v12_1_hw_init(ip_block);
2825 }
2826 
2827 static bool gfx_v12_1_is_idle(struct amdgpu_ip_block *ip_block)
2828 {
2829 	struct amdgpu_device *adev = ip_block->adev;
2830 	int i, num_xcc;
2831 
2832 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2833 	for (i = 0; i < num_xcc; i++) {
2834 		if (REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, i),
2835 				regGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE))
2836 			return false;
2837 	}
2838 	return true;
2839 }
2840 
2841 static int gfx_v12_1_wait_for_idle(struct amdgpu_ip_block *ip_block)
2842 {
2843 	unsigned i;
2844 	struct amdgpu_device *adev = ip_block->adev;
2845 
2846 	for (i = 0; i < adev->usec_timeout; i++) {
2847 		if (gfx_v12_1_is_idle(ip_block))
2848 			return 0;
2849 		udelay(1);
2850 	}
2851 	return -ETIMEDOUT;
2852 }
2853 
2854 static uint64_t gfx_v12_1_get_gpu_clock_counter(struct amdgpu_device *adev)
2855 {
2856 	uint64_t clock = 0;
2857 
2858 	if (adev->smuio.funcs &&
2859 	    adev->smuio.funcs->get_gpu_clock_counter)
2860 		clock = adev->smuio.funcs->get_gpu_clock_counter(adev);
2861 	else
2862 		dev_warn(adev->dev, "query gpu clock counter is not supported\n");
2863 
2864 	return clock;
2865 }
2866 
2867 static int gfx_v12_1_early_init(struct amdgpu_ip_block *ip_block)
2868 {
2869 	struct amdgpu_device *adev = ip_block->adev;
2870 
2871 	adev->gfx.funcs = &gfx_v12_1_gfx_funcs;
2872 
2873 	adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
2874 					  AMDGPU_MAX_COMPUTE_RINGS);
2875 
2876 	gfx_v12_1_set_kiq_pm4_funcs(adev);
2877 	gfx_v12_1_set_ring_funcs(adev);
2878 	gfx_v12_1_set_irq_funcs(adev);
2879 	gfx_v12_1_set_rlc_funcs(adev);
2880 	gfx_v12_1_set_mqd_funcs(adev);
2881 	gfx_v12_1_set_imu_funcs(adev);
2882 
2883 	gfx_v12_1_init_rlcg_reg_access_ctrl(adev);
2884 
2885 	return gfx_v12_1_init_microcode(adev);
2886 }
2887 
2888 static int gfx_v12_1_late_init(struct amdgpu_ip_block *ip_block)
2889 {
2890 	struct amdgpu_device *adev = ip_block->adev;
2891 	int r;
2892 
2893 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
2894 	if (r)
2895 		return r;
2896 
2897 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
2898 	if (r)
2899 		return r;
2900 
2901 	return 0;
2902 }
2903 
2904 static bool gfx_v12_1_is_rlc_enabled(struct amdgpu_device *adev)
2905 {
2906 	uint32_t rlc_cntl;
2907 
2908 	/* if RLC is not enabled, do nothing */
2909 	rlc_cntl = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL);
2910 	return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
2911 }
2912 
2913 static void gfx_v12_1_xcc_set_safe_mode(struct amdgpu_device *adev,
2914 					int xcc_id)
2915 {
2916 	uint32_t data;
2917 	unsigned i;
2918 
2919 	data = RLC_SAFE_MODE__CMD_MASK;
2920 	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
2921 
2922 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
2923 
2924 	/* wait for RLC_SAFE_MODE */
2925 	for (i = 0; i < adev->usec_timeout; i++) {
2926 		if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2927 						regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
2928 			break;
2929 		udelay(1);
2930 	}
2931 }
2932 
2933 static void gfx_v12_1_xcc_unset_safe_mode(struct amdgpu_device *adev,
2934 					  int xcc_id)
2935 {
2936 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2937 		     regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK);
2938 }
2939 
2940 static void gfx_v12_1_update_perf_clk(struct amdgpu_device *adev,
2941 				      bool enable)
2942 {
2943 	int i, num_xcc;
2944 
2945 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2946 	for (i = 0; i < num_xcc; i++)
2947 		gfx_v12_1_xcc_update_perf_clk(adev, enable, i);
2948 }
2949 
2950 static void gfx_v12_1_update_spm_vmid(struct amdgpu_device *adev,
2951 				      int xcc_id,
2952 				      struct amdgpu_ring *ring,
2953 				      unsigned vmid)
2954 {
2955 	u32 reg, data;
2956 
2957 	reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL);
2958 	if (amdgpu_sriov_is_pp_one_vf(adev))
2959 		data = RREG32_NO_KIQ(reg);
2960 	else
2961 		data = RREG32(reg);
2962 
2963 	data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
2964 	data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
2965 
2966 	if (amdgpu_sriov_is_pp_one_vf(adev))
2967 		WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL, data);
2968 	else
2969 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL, data);
2970 
2971 	if (ring
2972 	    && amdgpu_sriov_is_pp_one_vf(adev)
2973 	    && ((ring->funcs->type == AMDGPU_RING_TYPE_GFX)
2974 		|| (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE))) {
2975 		uint32_t reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL);
2976 		amdgpu_ring_emit_wreg(ring, reg, data);
2977 	}
2978 }
2979 
2980 static const struct amdgpu_rlc_funcs gfx_v12_1_rlc_funcs = {
2981 	.is_rlc_enabled = gfx_v12_1_is_rlc_enabled,
2982 	.set_safe_mode = gfx_v12_1_xcc_set_safe_mode,
2983 	.unset_safe_mode = gfx_v12_1_xcc_unset_safe_mode,
2984 	.init = gfx_v12_1_rlc_init,
2985 	.get_csb_size = gfx_v12_1_get_csb_size,
2986 	.get_csb_buffer = gfx_v12_1_get_csb_buffer,
2987 	.resume = gfx_v12_1_rlc_resume,
2988 	.stop = gfx_v12_1_rlc_stop,
2989 	.reset = gfx_v12_1_rlc_reset,
2990 	.start = gfx_v12_1_rlc_start,
2991 	.update_spm_vmid = gfx_v12_1_update_spm_vmid,
2992 };
2993 
2994 #if 0
2995 static void gfx_v12_cntl_power_gating(struct amdgpu_device *adev, bool enable)
2996 {
2997 	/* TODO */
2998 }
2999 
3000 static void gfx_v12_cntl_pg(struct amdgpu_device *adev, bool enable)
3001 {
3002 	/* TODO */
3003 }
3004 #endif
3005 
3006 static int gfx_v12_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
3007 					   enum amd_powergating_state state)
3008 {
3009 	struct amdgpu_device *adev = ip_block->adev;
3010 	bool enable = (state == AMD_PG_STATE_GATE);
3011 
3012 	if (amdgpu_sriov_vf(adev))
3013 		return 0;
3014 
3015 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
3016 	case IP_VERSION(12, 1, 0):
3017 		amdgpu_gfx_off_ctrl(adev, enable);
3018 		break;
3019 	default:
3020 		break;
3021 	}
3022 
3023 	return 0;
3024 }
3025 
3026 static void gfx_v12_1_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
3027 							   bool enable, int xcc_id)
3028 {
3029 	uint32_t def, data;
3030 
3031 	if (!(adev->cg_flags &
3032 	      (AMD_CG_SUPPORT_GFX_CGCG |
3033 	      AMD_CG_SUPPORT_GFX_CGLS |
3034 	      AMD_CG_SUPPORT_GFX_3D_CGCG |
3035 	      AMD_CG_SUPPORT_GFX_3D_CGLS)))
3036 		return;
3037 
3038 	if (enable) {
3039 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
3040 					  regRLC_CGTT_MGCG_OVERRIDE);
3041 
3042 		/* unset CGCG override */
3043 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
3044 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
3045 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3046 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3047 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG ||
3048 		    adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
3049 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
3050 
3051 		/* update CGCG override bits */
3052 		if (def != data)
3053 			WREG32_SOC15(GC, GET_INST(GC, xcc_id),
3054 				     regRLC_CGTT_MGCG_OVERRIDE, data);
3055 
3056 		/* enable cgcg FSM(0x0000363F) */
3057 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
3058 
3059 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
3060 			data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK;
3061 			data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3062 				 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
3063 		}
3064 
3065 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
3066 			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK;
3067 			data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3068 				 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3069 		}
3070 
3071 		if (def != data)
3072 			WREG32_SOC15(GC, GET_INST(GC, xcc_id),
3073 				     regRLC_CGCG_CGLS_CTRL, data);
3074 
3075 		/* set IDLE_POLL_COUNT(0x00900100) */
3076 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL);
3077 
3078 		data &= ~CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK;
3079 		data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
3080 		data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3081 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3082 
3083 		if (def != data)
3084 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data);
3085 
3086 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL);
3087 		data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
3088 		data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
3089 		data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
3090 		data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
3091 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL, data);
3092 	} else {
3093 		/* Program RLC_CGCG_CGLS_CTRL */
3094 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
3095 
3096 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
3097 			data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
3098 
3099 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3100 			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3101 
3102 		if (def != data)
3103 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
3104 	}
3105 }
3106 
3107 static void gfx_v12_1_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev,
3108 							   bool enable, int xcc_id)
3109 {
3110 	uint32_t data, def;
3111 	if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)))
3112 		return;
3113 
3114 	/* It is disabled by HW by default */
3115 	if (enable) {
3116 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
3117 			/* 1 - RLC_CGTT_MGCG_OVERRIDE */
3118 			def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
3119 
3120 			data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3121 				  RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
3122 				  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
3123 
3124 			if (def != data)
3125 				WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
3126 		}
3127 	} else {
3128 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
3129 			def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
3130 
3131 			data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
3132 				 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3133 				 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
3134 
3135 			if (def != data)
3136 				WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
3137 		}
3138 	}
3139 }
3140 
3141 static void gfx_v12_1_xcc_update_repeater_fgcg(struct amdgpu_device *adev,
3142 					       bool enable, int xcc_id)
3143 {
3144 	uint32_t def, data;
3145 
3146 	if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
3147 		return;
3148 
3149 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
3150 
3151 	if (enable)
3152 		data &= ~(RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK |
3153 				  RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE_MASK);
3154 	else
3155 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK |
3156 				RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE_MASK;
3157 
3158 	if (def != data)
3159 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
3160 }
3161 
3162 static void gfx_v12_1_xcc_update_sram_fgcg(struct amdgpu_device *adev,
3163 					   bool enable, int xcc_id)
3164 {
3165 	uint32_t def, data;
3166 
3167 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
3168 		return;
3169 
3170 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
3171 
3172 	if (enable)
3173 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
3174 	else
3175 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
3176 
3177 	if (def != data)
3178 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
3179 }
3180 
3181 static void gfx_v12_1_xcc_update_perf_clk(struct amdgpu_device *adev,
3182 					  bool enable, int xcc_id)
3183 {
3184 	uint32_t def, data;
3185 
3186 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK))
3187 		return;
3188 
3189 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
3190 
3191 	if (enable)
3192 		data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
3193 	else
3194 		data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
3195 
3196 	if (def != data)
3197 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
3198 }
3199 
3200 static int gfx_v12_1_xcc_update_gfx_clock_gating(struct amdgpu_device *adev,
3201 					     bool enable, int xcc_id)
3202 {
3203 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
3204 
3205 	gfx_v12_1_xcc_update_coarse_grain_clock_gating(adev, enable, xcc_id);
3206 
3207 	gfx_v12_1_xcc_update_medium_grain_clock_gating(adev, enable, xcc_id);
3208 
3209 	gfx_v12_1_xcc_update_repeater_fgcg(adev, enable, xcc_id);
3210 
3211 	gfx_v12_1_xcc_update_sram_fgcg(adev, enable, xcc_id);
3212 
3213 	gfx_v12_1_xcc_update_perf_clk(adev, enable, xcc_id);
3214 
3215 	if (adev->cg_flags &
3216 	    (AMD_CG_SUPPORT_GFX_MGCG |
3217 	     AMD_CG_SUPPORT_GFX_CGLS |
3218 	     AMD_CG_SUPPORT_GFX_CGCG |
3219 	     AMD_CG_SUPPORT_GFX_3D_CGCG |
3220 	     AMD_CG_SUPPORT_GFX_3D_CGLS))
3221 		gfx_v12_1_xcc_enable_gui_idle_interrupt(adev, enable, xcc_id);
3222 
3223 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
3224 
3225 	return 0;
3226 }
3227 
3228 static int gfx_v12_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
3229 					   enum amd_clockgating_state state)
3230 {
3231 	struct amdgpu_device *adev = ip_block->adev;
3232 	int i, num_xcc;
3233 
3234 	if (amdgpu_sriov_vf(adev))
3235 		return 0;
3236 
3237 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3238 	switch (adev->ip_versions[GC_HWIP][0]) {
3239 	case IP_VERSION(12, 1, 0):
3240 		for (i = 0; i < num_xcc; i++)
3241 			gfx_v12_1_xcc_update_gfx_clock_gating(adev,
3242 				  state == AMD_CG_STATE_GATE, i);
3243 		break;
3244 	default:
3245 		break;
3246 	}
3247 
3248 	return 0;
3249 }
3250 
3251 static void gfx_v12_1_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
3252 {
3253 	struct amdgpu_device *adev = ip_block->adev;
3254 	int data;
3255 
3256 	/* AMD_CG_SUPPORT_GFX_MGCG */
3257 	data = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CGTT_MGCG_OVERRIDE);
3258 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
3259 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
3260 
3261 	/* AMD_CG_SUPPORT_REPEATER_FGCG */
3262 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK))
3263 		*flags |= AMD_CG_SUPPORT_REPEATER_FGCG;
3264 
3265 	/* AMD_CG_SUPPORT_GFX_FGCG */
3266 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK))
3267 		*flags |= AMD_CG_SUPPORT_GFX_FGCG;
3268 
3269 	/* AMD_CG_SUPPORT_GFX_PERF_CLK */
3270 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK))
3271 		*flags |= AMD_CG_SUPPORT_GFX_PERF_CLK;
3272 
3273 	/* AMD_CG_SUPPORT_GFX_CGCG */
3274 	data = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CGCG_CGLS_CTRL);
3275 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
3276 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
3277 
3278 	/* AMD_CG_SUPPORT_GFX_CGLS */
3279 	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
3280 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
3281 }
3282 
3283 static u64 gfx_v12_1_ring_get_rptr_compute(struct amdgpu_ring *ring)
3284 {
3285 	/* gfx12 hardware is 32bit rptr */
3286 	return *(uint32_t *)ring->rptr_cpu_addr;
3287 }
3288 
3289 static u64 gfx_v12_1_ring_get_wptr_compute(struct amdgpu_ring *ring)
3290 {
3291 	u64 wptr;
3292 
3293 	/* XXX check if swapping is necessary on BE */
3294 	if (ring->use_doorbell)
3295 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
3296 	else
3297 		BUG();
3298 	return wptr;
3299 }
3300 
3301 static void gfx_v12_1_ring_set_wptr_compute(struct amdgpu_ring *ring)
3302 {
3303 	struct amdgpu_device *adev = ring->adev;
3304 
3305 	/* XXX check if swapping is necessary on BE */
3306 	if (ring->use_doorbell) {
3307 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
3308 			     ring->wptr);
3309 		WDOORBELL64(ring->doorbell_index, ring->wptr);
3310 	} else {
3311 		BUG(); /* only DOORBELL method supported on gfx12 now */
3312 	}
3313 }
3314 
3315 static void gfx_v12_1_ring_emit_ib_compute(struct amdgpu_ring *ring,
3316 					   struct amdgpu_job *job,
3317 					   struct amdgpu_ib *ib,
3318 					   uint32_t flags)
3319 {
3320 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
3321 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
3322 
3323 	/* Currently, there is a high possibility to get wave ID mismatch
3324 	 * between ME and GDS, leading to a hw deadlock, because ME generates
3325 	 * different wave IDs than the GDS expects. This situation happens
3326 	 * randomly when at least 5 compute pipes use GDS ordered append.
3327 	 * The wave IDs generated by ME are also wrong after suspend/resume.
3328 	 * Those are probably bugs somewhere else in the kernel driver.
3329 	 *
3330 	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
3331 	 * GDS to 0 for this ring (me/pipe).
3332 	 */
3333 	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
3334 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3335 		amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
3336 	}
3337 
3338 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3339 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3340 	amdgpu_ring_write(ring,
3341 #ifdef __BIG_ENDIAN
3342 				(2 << 0) |
3343 #endif
3344 				lower_32_bits(ib->gpu_addr));
3345 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
3346 	amdgpu_ring_write(ring, control);
3347 }
3348 
3349 static void gfx_v12_1_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
3350 				     u64 seq, unsigned flags)
3351 {
3352 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
3353 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
3354 
3355 	/* RELEASE_MEM - flush caches, send int */
3356 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
3357 	amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ(1) |
3358 				 PACKET3_RELEASE_MEM_GCR_GLV_WB |
3359 				 PACKET3_RELEASE_MEM_GCR_GL2_WB |
3360 				 PACKET3_RELEASE_MEM_GCR_GL2_SCOPE(2) |
3361 				 PACKET3_RELEASE_MEM_TEMPORAL(3) |
3362 				 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
3363 				 PACKET3_RELEASE_MEM_EVENT_INDEX(5)));
3364 	amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) |
3365 				 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0)));
3366 
3367 	/*
3368 	 * the address should be Qword aligned if 64bit write, Dword
3369 	 * aligned if only send 32bit data low (discard data high)
3370 	 */
3371 	if (write64bit)
3372 		BUG_ON(addr & 0x7);
3373 	else
3374 		BUG_ON(addr & 0x3);
3375 	amdgpu_ring_write(ring, lower_32_bits(addr));
3376 	amdgpu_ring_write(ring, upper_32_bits(addr));
3377 	amdgpu_ring_write(ring, lower_32_bits(seq));
3378 	amdgpu_ring_write(ring, upper_32_bits(seq));
3379 	amdgpu_ring_write(ring, 0);
3380 }
3381 
3382 static void gfx_v12_1_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
3383 {
3384 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3385 	uint32_t seq = ring->fence_drv.sync_seq;
3386 	uint64_t addr = ring->fence_drv.gpu_addr;
3387 
3388 	gfx_v12_1_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr),
3389 			       upper_32_bits(addr), seq, 0xffffffff, 4);
3390 }
3391 
3392 static void gfx_v12_1_ring_invalidate_tlbs(struct amdgpu_ring *ring,
3393 				   uint16_t pasid, uint32_t flush_type,
3394 				   bool all_hub, uint8_t dst_sel)
3395 {
3396 	amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
3397 	amdgpu_ring_write(ring,
3398 			  PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) |
3399 			  PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
3400 			  PACKET3_INVALIDATE_TLBS_PASID(pasid) |
3401 			  PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
3402 }
3403 
3404 static void gfx_v12_1_ring_emit_vm_flush(struct amdgpu_ring *ring,
3405 					 unsigned vmid, uint64_t pd_addr)
3406 {
3407 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
3408 
3409 	/* compute doesn't have PFP */
3410 	if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
3411 		/* sync PFP to ME, otherwise we might get invalid PFP reads */
3412 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3413 		amdgpu_ring_write(ring, 0x0);
3414 	}
3415 }
3416 
3417 static void gfx_v12_1_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
3418 					  u64 seq, unsigned int flags)
3419 {
3420 	struct amdgpu_device *adev = ring->adev;
3421 
3422 	/* we only allocate 32bit for each seq wb address */
3423 	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
3424 
3425 	/* write fence seq to the "addr" */
3426 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3427 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3428 				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
3429 	amdgpu_ring_write(ring, lower_32_bits(addr));
3430 	amdgpu_ring_write(ring, upper_32_bits(addr));
3431 	amdgpu_ring_write(ring, lower_32_bits(seq));
3432 
3433 	if (flags & AMDGPU_FENCE_FLAG_INT) {
3434 		/* set register to trigger INT */
3435 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3436 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3437 					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
3438 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCPC_INT_STATUS));
3439 		amdgpu_ring_write(ring, 0);
3440 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
3441 	}
3442 }
3443 
3444 static void gfx_v12_1_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
3445 				     uint32_t reg_val_offs)
3446 {
3447 	struct amdgpu_device *adev = ring->adev;
3448 
3449 	reg = soc_v1_0_normalize_xcc_reg_offset(reg);
3450 
3451 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
3452 	amdgpu_ring_write(ring, 0 |	/* src: register*/
3453 				(5 << 8) |	/* dst: memory */
3454 				(1 << 20));	/* write confirm */
3455 	amdgpu_ring_write(ring, reg);
3456 	amdgpu_ring_write(ring, 0);
3457 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
3458 				reg_val_offs * 4));
3459 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
3460 				reg_val_offs * 4));
3461 }
3462 
3463 static void gfx_v12_1_ring_emit_wreg(struct amdgpu_ring *ring,
3464 				     uint32_t reg,
3465 				     uint32_t val)
3466 {
3467 	uint32_t cmd = 0;
3468 
3469 	reg = soc_v1_0_normalize_xcc_reg_offset(reg);
3470 
3471 	switch (ring->funcs->type) {
3472 	case AMDGPU_RING_TYPE_KIQ:
3473 		cmd = (1 << 16); /* no inc addr */
3474 		break;
3475 	default:
3476 		cmd = WR_CONFIRM;
3477 		break;
3478 	}
3479 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3480 	amdgpu_ring_write(ring, cmd);
3481 	amdgpu_ring_write(ring, reg);
3482 	amdgpu_ring_write(ring, 0);
3483 	amdgpu_ring_write(ring, val);
3484 }
3485 
3486 static void gfx_v12_1_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
3487 					uint32_t val, uint32_t mask)
3488 {
3489 	gfx_v12_1_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
3490 }
3491 
3492 static void gfx_v12_1_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
3493 						   uint32_t reg0, uint32_t reg1,
3494 						   uint32_t ref, uint32_t mask)
3495 {
3496 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3497 
3498 	gfx_v12_1_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
3499 			       ref, mask, 0x20);
3500 }
3501 
3502 static void gfx_v12_1_xcc_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
3503 							int me, int pipe,
3504 							enum amdgpu_interrupt_state state,
3505 							int xcc_id)
3506 {
3507 	u32 mec_int_cntl, mec_int_cntl_reg;
3508 
3509 	/*
3510 	 * amdgpu controls only the first MEC. That's why this function only
3511 	 * handles the setting of interrupts for this specific MEC. All other
3512 	 * pipes' interrupts are set by amdkfd.
3513 	 */
3514 
3515 	if (me == 1) {
3516 		switch (pipe) {
3517 		case 0:
3518 			mec_int_cntl_reg = SOC15_REG_OFFSET(
3519 					GC, GET_INST(GC, xcc_id),
3520 					regCP_ME1_PIPE0_INT_CNTL);
3521 			break;
3522 		case 1:
3523 			mec_int_cntl_reg = SOC15_REG_OFFSET(
3524 					GC, GET_INST(GC, xcc_id),
3525 					regCP_ME1_PIPE1_INT_CNTL);
3526 			break;
3527 		case 2:
3528 			mec_int_cntl_reg = SOC15_REG_OFFSET(
3529 					GC, GET_INST(GC, xcc_id),
3530 					regCP_ME1_PIPE2_INT_CNTL);
3531 			break;
3532 		case 3:
3533 			mec_int_cntl_reg = SOC15_REG_OFFSET(
3534 					GC, GET_INST(GC, xcc_id),
3535 					regCP_ME1_PIPE3_INT_CNTL);
3536 			break;
3537 		default:
3538 			DRM_DEBUG("invalid pipe %d\n", pipe);
3539 			return;
3540 		}
3541 	} else {
3542 		DRM_DEBUG("invalid me %d\n", me);
3543 		return;
3544 	}
3545 
3546 	switch (state) {
3547 	case AMDGPU_IRQ_STATE_DISABLE:
3548 		mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3549 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3550 					     TIME_STAMP_INT_ENABLE, 0);
3551 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3552 					     GENERIC0_INT_ENABLE, 0);
3553 		WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3554 		break;
3555 	case AMDGPU_IRQ_STATE_ENABLE:
3556 		mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3557 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3558 					     TIME_STAMP_INT_ENABLE, 1);
3559 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3560 					     GENERIC0_INT_ENABLE, 1);
3561 		WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3562 		break;
3563 	default:
3564 		break;
3565 	}
3566 }
3567 
3568 static int gfx_v12_1_set_eop_interrupt_state(struct amdgpu_device *adev,
3569 					    struct amdgpu_irq_src *src,
3570 					    unsigned type,
3571 					    enum amdgpu_interrupt_state state)
3572 {
3573 	int i, num_xcc;
3574 
3575 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3576 	for (i = 0; i < num_xcc; i++) {
3577 		switch (type) {
3578 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
3579 			gfx_v12_1_xcc_set_compute_eop_interrupt_state(
3580 					adev, 1, 0, state, i);
3581 			break;
3582 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
3583 			gfx_v12_1_xcc_set_compute_eop_interrupt_state(
3584 					adev, 1, 1, state, i);
3585 			break;
3586 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
3587 			gfx_v12_1_xcc_set_compute_eop_interrupt_state(
3588 					adev, 1, 2, state, i);
3589 			break;
3590 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
3591 			gfx_v12_1_xcc_set_compute_eop_interrupt_state(
3592 					adev, 1, 3, state, i);
3593 			break;
3594 		default:
3595 			break;
3596 		}
3597 	}
3598 
3599 	return 0;
3600 }
3601 
3602 static int gfx_v12_1_eop_irq(struct amdgpu_device *adev,
3603 			     struct amdgpu_irq_src *source,
3604 			     struct amdgpu_iv_entry *entry)
3605 {
3606 	u32 doorbell_offset = entry->src_data[0];
3607 	u8 me_id, pipe_id, queue_id;
3608 	struct amdgpu_ring *ring;
3609 	int i, xcc_id;
3610 
3611 	DRM_DEBUG("IH: CP EOP\n");
3612 
3613 	if (adev->enable_mes && doorbell_offset) {
3614 		struct amdgpu_userq_fence_driver *fence_drv = NULL;
3615 		struct xarray *xa = &adev->userq_xa;
3616 		unsigned long flags;
3617 
3618 		xa_lock_irqsave(xa, flags);
3619 		fence_drv = xa_load(xa, doorbell_offset);
3620 		if (fence_drv)
3621 			amdgpu_userq_fence_driver_process(fence_drv);
3622 		xa_unlock_irqrestore(xa, flags);
3623 	} else {
3624 		me_id = (entry->ring_id & 0x0c) >> 2;
3625 		pipe_id = (entry->ring_id & 0x03) >> 0;
3626 		queue_id = (entry->ring_id & 0x70) >> 4;
3627 		xcc_id = gfx_v12_1_ih_to_xcc_inst(adev, entry->node_id);
3628 
3629 		if (xcc_id == -EINVAL)
3630 			return -EINVAL;
3631 
3632 		switch (me_id) {
3633 		case 0:
3634 			if (pipe_id == 0)
3635 				amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
3636 			else
3637 				amdgpu_fence_process(&adev->gfx.gfx_ring[1]);
3638 			break;
3639 		case 1:
3640 		case 2:
3641 			for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3642 				ring = &adev->gfx.compute_ring
3643 						[i +
3644 						 xcc_id * adev->gfx.num_compute_rings];
3645 				/* Per-queue interrupt is supported for MEC starting from VI.
3646 				 * The interrupt can only be enabled/disabled per pipe instead
3647 				 * of per queue.
3648 				 */
3649 				if ((ring->me == me_id) &&
3650 				    (ring->pipe == pipe_id) &&
3651 				    (ring->queue == queue_id))
3652 					amdgpu_fence_process(ring);
3653 			}
3654 			break;
3655 		}
3656 	}
3657 
3658 	return 0;
3659 }
3660 
3661 static int gfx_v12_1_set_priv_reg_fault_state(struct amdgpu_device *adev,
3662 					      struct amdgpu_irq_src *source,
3663 					      unsigned type,
3664 					      enum amdgpu_interrupt_state state)
3665 {
3666 	int i, num_xcc;
3667 
3668 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3669 	switch (state) {
3670 	case AMDGPU_IRQ_STATE_DISABLE:
3671 	case AMDGPU_IRQ_STATE_ENABLE:
3672 		for (i = 0; i < num_xcc; i++)
3673 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3674 					      PRIV_REG_INT_ENABLE,
3675 					      state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3676 		break;
3677 	default:
3678 		break;
3679 	}
3680 
3681 	return 0;
3682 }
3683 
3684 static int gfx_v12_1_set_priv_inst_fault_state(struct amdgpu_device *adev,
3685 					       struct amdgpu_irq_src *source,
3686 					       unsigned type,
3687 					       enum amdgpu_interrupt_state state)
3688 {
3689 	int i, num_xcc;
3690 
3691 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3692 	switch (state) {
3693 	case AMDGPU_IRQ_STATE_DISABLE:
3694 	case AMDGPU_IRQ_STATE_ENABLE:
3695 		for (i = 0; i < num_xcc; i++)
3696 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3697 				       PRIV_INSTR_INT_ENABLE,
3698 				       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3699 		break;
3700 	default:
3701 		break;
3702 	}
3703 
3704 	return 0;
3705 }
3706 
3707 static void gfx_v12_1_handle_priv_fault(struct amdgpu_device *adev,
3708 					struct amdgpu_iv_entry *entry)
3709 {
3710 	u8 me_id, pipe_id, queue_id;
3711 	struct amdgpu_ring *ring;
3712 	int i, xcc_id;
3713 
3714 	me_id = (entry->ring_id & 0x0c) >> 2;
3715 	pipe_id = (entry->ring_id & 0x03) >> 0;
3716 	queue_id = (entry->ring_id & 0x70) >> 4;
3717 	xcc_id = gfx_v12_1_ih_to_xcc_inst(adev, entry->node_id);
3718 
3719 	if (xcc_id == -EINVAL)
3720 		return;
3721 
3722 	switch (me_id) {
3723 	case 0:
3724 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3725 			ring = &adev->gfx.gfx_ring[i];
3726 			/* we only enabled 1 gfx queue per pipe for now */
3727 			if (ring->me == me_id && ring->pipe == pipe_id)
3728 				drm_sched_fault(&ring->sched);
3729 		}
3730 		break;
3731 	case 1:
3732 	case 2:
3733 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3734 			ring = &adev->gfx.compute_ring
3735 					[i +
3736 					 xcc_id * adev->gfx.num_compute_rings];
3737 			if (ring->me == me_id && ring->pipe == pipe_id &&
3738 			    ring->queue == queue_id)
3739 				drm_sched_fault(&ring->sched);
3740 		}
3741 		break;
3742 	default:
3743 		BUG();
3744 		break;
3745 	}
3746 }
3747 
3748 static int gfx_v12_1_priv_reg_irq(struct amdgpu_device *adev,
3749 				  struct amdgpu_irq_src *source,
3750 				  struct amdgpu_iv_entry *entry)
3751 {
3752 	DRM_ERROR("Illegal register access in command stream\n");
3753 	gfx_v12_1_handle_priv_fault(adev, entry);
3754 	return 0;
3755 }
3756 
3757 static int gfx_v12_1_priv_inst_irq(struct amdgpu_device *adev,
3758 				   struct amdgpu_irq_src *source,
3759 				   struct amdgpu_iv_entry *entry)
3760 {
3761 	DRM_ERROR("Illegal instruction in command stream\n");
3762 	gfx_v12_1_handle_priv_fault(adev, entry);
3763 	return 0;
3764 }
3765 
3766 static void gfx_v12_1_emit_mem_sync(struct amdgpu_ring *ring)
3767 {
3768 	const unsigned int gcr_cntl =
3769 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) |
3770 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) |
3771 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) |
3772 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) |
3773 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1) |
3774 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_SCOPE(2);
3775 
3776 	/* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */
3777 	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6));
3778 	amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */
3779 	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
3780 	amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
3781 	amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
3782 	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
3783 	amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
3784 	amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
3785 }
3786 
3787 static const struct amd_ip_funcs gfx_v12_1_ip_funcs = {
3788 	.name = "gfx_v12_1",
3789 	.early_init = gfx_v12_1_early_init,
3790 	.late_init = gfx_v12_1_late_init,
3791 	.sw_init = gfx_v12_1_sw_init,
3792 	.sw_fini = gfx_v12_1_sw_fini,
3793 	.hw_init = gfx_v12_1_hw_init,
3794 	.hw_fini = gfx_v12_1_hw_fini,
3795 	.suspend = gfx_v12_1_suspend,
3796 	.resume = gfx_v12_1_resume,
3797 	.is_idle = gfx_v12_1_is_idle,
3798 	.wait_for_idle = gfx_v12_1_wait_for_idle,
3799 	.set_clockgating_state = gfx_v12_1_set_clockgating_state,
3800 	.set_powergating_state = gfx_v12_1_set_powergating_state,
3801 	.get_clockgating_state = gfx_v12_1_get_clockgating_state,
3802 };
3803 
3804 static const struct amdgpu_ring_funcs gfx_v12_1_ring_funcs_compute = {
3805 	.type = AMDGPU_RING_TYPE_COMPUTE,
3806 	.align_mask = 0xff,
3807 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
3808 	.support_64bit_ptrs = true,
3809 	.get_rptr = gfx_v12_1_ring_get_rptr_compute,
3810 	.get_wptr = gfx_v12_1_ring_get_wptr_compute,
3811 	.set_wptr = gfx_v12_1_ring_set_wptr_compute,
3812 	.emit_frame_size =
3813 		7 + /* gfx_v12_1_ring_emit_pipeline_sync */
3814 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
3815 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
3816 		2 + /* gfx_v12_1_ring_emit_vm_flush */
3817 		8 + 8 + 8 + /* gfx_v12_1_ring_emit_fence x3 for user fence, vm fence */
3818 		8, /* gfx_v12_1_emit_mem_sync */
3819 	.emit_ib_size =	7, /* gfx_v12_1_ring_emit_ib_compute */
3820 	.emit_ib = gfx_v12_1_ring_emit_ib_compute,
3821 	.emit_fence = gfx_v12_1_ring_emit_fence,
3822 	.emit_pipeline_sync = gfx_v12_1_ring_emit_pipeline_sync,
3823 	.emit_vm_flush = gfx_v12_1_ring_emit_vm_flush,
3824 	.test_ring = gfx_v12_1_ring_test_ring,
3825 	.test_ib = gfx_v12_1_ring_test_ib,
3826 	.insert_nop = amdgpu_ring_insert_nop,
3827 	.pad_ib = amdgpu_ring_generic_pad_ib,
3828 	.emit_wreg = gfx_v12_1_ring_emit_wreg,
3829 	.emit_reg_wait = gfx_v12_1_ring_emit_reg_wait,
3830 	.emit_reg_write_reg_wait = gfx_v12_1_ring_emit_reg_write_reg_wait,
3831 	.emit_mem_sync = gfx_v12_1_emit_mem_sync,
3832 };
3833 
3834 static const struct amdgpu_ring_funcs gfx_v12_1_ring_funcs_kiq = {
3835 	.type = AMDGPU_RING_TYPE_KIQ,
3836 	.align_mask = 0xff,
3837 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
3838 	.support_64bit_ptrs = true,
3839 	.get_rptr = gfx_v12_1_ring_get_rptr_compute,
3840 	.get_wptr = gfx_v12_1_ring_get_wptr_compute,
3841 	.set_wptr = gfx_v12_1_ring_set_wptr_compute,
3842 	.emit_frame_size =
3843 		7 + /* gfx_v12_1_ring_emit_pipeline_sync */
3844 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
3845 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
3846 		2 + /* gfx_v12_1_ring_emit_vm_flush */
3847 		8 + 8 + 8, /* gfx_v12_1_ring_emit_fence_kiq x3 for user fence, vm fence */
3848 	.emit_ib_size =	7, /* gfx_v12_1_ring_emit_ib_compute */
3849 	.emit_ib = gfx_v12_1_ring_emit_ib_compute,
3850 	.emit_fence = gfx_v12_1_ring_emit_fence_kiq,
3851 	.test_ring = gfx_v12_1_ring_test_ring,
3852 	.test_ib = gfx_v12_1_ring_test_ib,
3853 	.insert_nop = amdgpu_ring_insert_nop,
3854 	.pad_ib = amdgpu_ring_generic_pad_ib,
3855 	.emit_rreg = gfx_v12_1_ring_emit_rreg,
3856 	.emit_wreg = gfx_v12_1_ring_emit_wreg,
3857 	.emit_reg_wait = gfx_v12_1_ring_emit_reg_wait,
3858 	.emit_reg_write_reg_wait = gfx_v12_1_ring_emit_reg_write_reg_wait,
3859 };
3860 
3861 static void gfx_v12_1_set_ring_funcs(struct amdgpu_device *adev)
3862 {
3863 	int i, j, num_xcc;
3864 
3865 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3866 	for (i = 0; i < num_xcc; i++) {
3867 		adev->gfx.kiq[i].ring.funcs = &gfx_v12_1_ring_funcs_kiq;
3868 
3869 		for (j = 0; j < adev->gfx.num_compute_rings; j++)
3870 			adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs =
3871 						&gfx_v12_1_ring_funcs_compute;
3872 	}
3873 }
3874 
3875 static const struct amdgpu_irq_src_funcs gfx_v12_1_eop_irq_funcs = {
3876 	.set = gfx_v12_1_set_eop_interrupt_state,
3877 	.process = gfx_v12_1_eop_irq,
3878 };
3879 
3880 static const struct amdgpu_irq_src_funcs gfx_v12_1_priv_reg_irq_funcs = {
3881 	.set = gfx_v12_1_set_priv_reg_fault_state,
3882 	.process = gfx_v12_1_priv_reg_irq,
3883 };
3884 
3885 static const struct amdgpu_irq_src_funcs gfx_v12_1_priv_inst_irq_funcs = {
3886 	.set = gfx_v12_1_set_priv_inst_fault_state,
3887 	.process = gfx_v12_1_priv_inst_irq,
3888 };
3889 
3890 static void gfx_v12_1_set_irq_funcs(struct amdgpu_device *adev)
3891 {
3892 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
3893 	adev->gfx.eop_irq.funcs = &gfx_v12_1_eop_irq_funcs;
3894 
3895 	adev->gfx.priv_reg_irq.num_types = 1;
3896 	adev->gfx.priv_reg_irq.funcs = &gfx_v12_1_priv_reg_irq_funcs;
3897 
3898 	adev->gfx.priv_inst_irq.num_types = 1;
3899 	adev->gfx.priv_inst_irq.funcs = &gfx_v12_1_priv_inst_irq_funcs;
3900 }
3901 
3902 static void gfx_v12_1_set_imu_funcs(struct amdgpu_device *adev)
3903 {
3904 	if (adev->flags & AMD_IS_APU)
3905 		adev->gfx.imu.mode = MISSION_MODE;
3906 	else
3907 		adev->gfx.imu.mode = DEBUG_MODE;
3908 	if (!amdgpu_sriov_vf(adev))
3909 		adev->gfx.imu.funcs = &gfx_v12_1_imu_funcs;
3910 }
3911 
3912 static void gfx_v12_1_set_rlc_funcs(struct amdgpu_device *adev)
3913 {
3914 	adev->gfx.rlc.funcs = &gfx_v12_1_rlc_funcs;
3915 }
3916 
3917 static void gfx_v12_1_set_mqd_funcs(struct amdgpu_device *adev)
3918 {
3919 	/* set compute eng mqd */
3920 	adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size =
3921 		sizeof(struct v12_1_compute_mqd);
3922 	adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd =
3923 		gfx_v12_1_compute_mqd_init;
3924 }
3925 
3926 static void gfx_v12_1_set_user_cu_inactive_bitmap_per_sh(struct amdgpu_device *adev,
3927 							  u32 bitmap, int xcc_id)
3928 {
3929 	u32 data;
3930 
3931 	if (!bitmap)
3932 		return;
3933 
3934 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
3935 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
3936 
3937 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data);
3938 }
3939 
3940 static u32 gfx_v12_1_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev,
3941 						 int xcc_id)
3942 {
3943 	u32 data, mask;
3944 
3945 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG);
3946 	data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG);
3947 
3948 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
3949 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
3950 
3951 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
3952 
3953 	return (~data) & mask;
3954 }
3955 
3956 static int gfx_v12_1_get_cu_info(struct amdgpu_device *adev,
3957 				 struct amdgpu_cu_info *cu_info)
3958 {
3959 	int i, j, k, counter, xcc_id, active_cu_number = 0;
3960 	u32 mask, bitmap;
3961 	unsigned int disable_masks[2 * 2];
3962 
3963 	if (!adev || !cu_info)
3964 		return -EINVAL;
3965 
3966 	if (adev->gfx.config.max_shader_engines > 2 ||
3967 	    adev->gfx.config.max_sh_per_se > 2) {
3968 		dev_err(adev->dev,
3969 			"Max SE (%d) and Max SA per SE (%d) is greater than expected\n",
3970 			adev->gfx.config.max_shader_engines,
3971 			adev->gfx.config.max_sh_per_se);
3972 		return -EINVAL;
3973 	}
3974 
3975 	amdgpu_gfx_parse_disable_cu(adev, disable_masks,
3976 				    adev->gfx.config.max_shader_engines,
3977 				    adev->gfx.config.max_sh_per_se);
3978 
3979 	mutex_lock(&adev->grbm_idx_mutex);
3980 	for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
3981 		for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3982 			for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3983 				bitmap = i * adev->gfx.config.max_sh_per_se + j;
3984 				if (!((gfx_v12_1_get_sa_active_bitmap(adev, xcc_id) >> bitmap) & 1))
3985 					continue;
3986 				mask = 1;
3987 				counter = 0;
3988 				gfx_v12_1_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id);
3989 				gfx_v12_1_set_user_cu_inactive_bitmap_per_sh(
3990 					adev,
3991 					disable_masks[i * adev->gfx.config.max_sh_per_se + j],
3992 					xcc_id);
3993 				bitmap = gfx_v12_1_get_cu_active_bitmap_per_sh(adev, xcc_id);
3994 
3995 				cu_info->bitmap[xcc_id][i][j] = bitmap;
3996 
3997 				for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
3998 					if (bitmap & mask)
3999 						counter++;
4000 
4001 					mask <<= 1;
4002 				}
4003 				active_cu_number += counter;
4004 			}
4005 		}
4006 		gfx_v12_1_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, xcc_id);
4007 	}
4008 	mutex_unlock(&adev->grbm_idx_mutex);
4009 
4010 	cu_info->number = active_cu_number;
4011 	cu_info->simd_per_cu = NUM_SIMD_PER_CU_GFX12_1;
4012 	cu_info->lds_size = 320;
4013 
4014 	return 0;
4015 }
4016 
4017 const struct amdgpu_ip_block_version gfx_v12_1_ip_block = {
4018 	.type = AMD_IP_BLOCK_TYPE_GFX,
4019 	.major = 12,
4020 	.minor = 1,
4021 	.rev = 0,
4022 	.funcs = &gfx_v12_1_ip_funcs,
4023 };
4024 
4025 static int gfx_v12_1_xcp_resume(void *handle, uint32_t inst_mask)
4026 {
4027 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4028 	uint32_t tmp_mask;
4029 	int i, r;
4030 
4031 	/* TODO : Initialize golden regs */
4032 	/* gfx_v12_1_init_golden_registers(adev); */
4033 
4034 	tmp_mask = inst_mask;
4035 	for_each_inst(i, tmp_mask)
4036 		gfx_v12_1_xcc_constants_init(adev, i);
4037 
4038 	if (!amdgpu_sriov_vf(adev)) {
4039 		tmp_mask = inst_mask;
4040 		for_each_inst(i, tmp_mask) {
4041 			r = gfx_v12_1_xcc_rlc_resume(adev, i);
4042 			if (r)
4043 				return r;
4044 		}
4045 	}
4046 
4047 	r = gfx_v12_1_xcc_cp_resume(adev, inst_mask);
4048 
4049 	return r;
4050 }
4051 
4052 static int gfx_v12_1_xcp_suspend(void *handle, uint32_t inst_mask)
4053 {
4054 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4055 	int i;
4056 
4057 	for_each_inst(i, inst_mask)
4058 		gfx_v12_1_xcc_fini(adev, i);
4059 
4060 	return 0;
4061 }
4062 
4063 struct amdgpu_xcp_ip_funcs gfx_v12_1_xcp_funcs = {
4064 	.suspend = &gfx_v12_1_xcp_suspend,
4065 	.resume = &gfx_v12_1_xcp_resume
4066 };
4067