xref: /linux/drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c (revision e56e3cff2a1bb29545ddbec562e76c0419363a40)
1 /*
2  * Copyright 2025 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/delay.h>
24 #include <linux/kernel.h>
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include "amdgpu.h"
29 #include "amdgpu_gfx.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_smu.h"
32 #include "amdgpu_atomfirmware.h"
33 #include "amdgpu_userq_fence.h"
34 #include "imu_v12_1.h"
35 #include "soc_v1_0.h"
36 #include "gfx_v12_1_pkt.h"
37 
38 #include "gc/gc_12_1_0_offset.h"
39 #include "gc/gc_12_1_0_sh_mask.h"
40 #include "soc24_enum.h"
41 #include "ivsrcid/gfx/irqsrcs_gfx_12_1_0.h"
42 
43 #include "soc15.h"
44 #include "clearstate_gfx12.h"
45 #include "v12_structs.h"
46 #include "gfx_v12_1.h"
47 #include "mes_v12_1.h"
48 
49 #define GFX12_MEC_HPD_SIZE	2048
50 #define NUM_SIMD_PER_CU_GFX12_1	4
51 
52 #define RLCG_UCODE_LOADING_START_ADDRESS	0x00002000L
53 
54 #define regCP_HQD_EOP_CONTROL_DEFAULT                                             0x00000000
55 #define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT                                     0x00000000
56 #define regCP_MQD_CONTROL_DEFAULT                                                 0x00000100
57 #define regCP_HQD_PQ_CONTROL_DEFAULT                                              0x00308509
58 #define regCP_HQD_PQ_RPTR_DEFAULT                                                 0x00000000
59 #define regCP_HQD_PERSISTENT_STATE_DEFAULT                                        0x0ae06301
60 #define regCP_HQD_IB_CONTROL_DEFAULT                                              0x00100000
61 
62 MODULE_FIRMWARE("amdgpu/gc_12_1_0_mec.bin");
63 MODULE_FIRMWARE("amdgpu/gc_12_1_0_rlc.bin");
64 
65 #define SH_MEM_ALIGNMENT_MODE_UNALIGNED_GFX12_1_0	0x00000001
66 #define DEFAULT_SH_MEM_CONFIG \
67 	((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
68 	 (SH_MEM_ALIGNMENT_MODE_UNALIGNED_GFX12_1_0 << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
69 	 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
70 
71 static void gfx_v12_1_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id);
72 static void gfx_v12_1_set_ring_funcs(struct amdgpu_device *adev);
73 static void gfx_v12_1_set_irq_funcs(struct amdgpu_device *adev);
74 static void gfx_v12_1_set_rlc_funcs(struct amdgpu_device *adev);
75 static void gfx_v12_1_set_mqd_funcs(struct amdgpu_device *adev);
76 static void gfx_v12_1_set_imu_funcs(struct amdgpu_device *adev);
77 static int gfx_v12_1_get_cu_info(struct amdgpu_device *adev,
78 				 struct amdgpu_cu_info *cu_info);
79 static uint64_t gfx_v12_1_get_gpu_clock_counter(struct amdgpu_device *adev);
80 static void gfx_v12_1_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num,
81 				       u32 sh_num, u32 instance, int xcc_id);
82 static void gfx_v12_1_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
83 				     uint32_t val);
84 static int gfx_v12_1_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
85 static void gfx_v12_1_ring_invalidate_tlbs(struct amdgpu_ring *ring,
86 					   uint16_t pasid, uint32_t flush_type,
87 					   bool all_hub, uint8_t dst_sel);
88 static void gfx_v12_1_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
89 static void gfx_v12_1_xcc_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
90 static void gfx_v12_1_update_perf_clk(struct amdgpu_device *adev,
91 				      bool enable);
92 static void gfx_v12_1_xcc_update_perf_clk(struct amdgpu_device *adev,
93 					 bool enable, int xcc_id);
94 static int gfx_v12_1_init_cp_compute_microcode_bo(struct amdgpu_device *adev);
95 
96 static void gfx_v12_1_kiq_set_resources(struct amdgpu_ring *kiq_ring,
97 					uint64_t queue_mask)
98 {
99 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
100 	amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
101 			  PACKET3_SET_RESOURCES_QUEUE_TYPE(0));	/* vmid_mask:0 queue_type:0 (KIQ) */
102 	amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask));	/* queue mask lo */
103 	amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask));	/* queue mask hi */
104 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask lo */
105 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask hi */
106 	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
107 	amdgpu_ring_write(kiq_ring, 0);
108 }
109 
110 static void gfx_v12_1_kiq_map_queues(struct amdgpu_ring *kiq_ring,
111 				     struct amdgpu_ring *ring)
112 {
113 	uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
114 	uint64_t wptr_addr = ring->wptr_gpu_addr;
115 	uint32_t me = 0, eng_sel = 0;
116 
117 	switch (ring->funcs->type) {
118 	case AMDGPU_RING_TYPE_COMPUTE:
119 		me = 1;
120 		eng_sel = 0;
121 		break;
122 	case AMDGPU_RING_TYPE_MES:
123 		me = 2;
124 		eng_sel = 5;
125 		break;
126 	default:
127 		WARN_ON(1);
128 	}
129 
130 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
131 	/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
132 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
133 			  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
134 			  PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
135 			  PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
136 			  PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
137 			  PACKET3_MAP_QUEUES_ME((me)) |
138 			  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
139 			  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
140 			  PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
141 			  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
142 	amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
143 	amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
144 	amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
145 	amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
146 	amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
147 }
148 
149 static void gfx_v12_1_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
150 				       struct amdgpu_ring *ring,
151 				       enum amdgpu_unmap_queues_action action,
152 				       u64 gpu_addr, u64 seq)
153 {
154 	struct amdgpu_device *adev = kiq_ring->adev;
155 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
156 
157 	if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
158 		amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr,
159 					      seq, kiq_ring->xcc_id);
160 		return;
161 	}
162 
163 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
164 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
165 			  PACKET3_UNMAP_QUEUES_ACTION(action) |
166 			  PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
167 			  PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
168 			  PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
169 	amdgpu_ring_write(kiq_ring,
170 		  PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
171 
172 	if (action == PREEMPT_QUEUES_NO_UNMAP) {
173 		amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
174 		amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
175 		amdgpu_ring_write(kiq_ring, seq);
176 	} else {
177 		amdgpu_ring_write(kiq_ring, 0);
178 		amdgpu_ring_write(kiq_ring, 0);
179 		amdgpu_ring_write(kiq_ring, 0);
180 	}
181 }
182 
183 static void gfx_v12_1_kiq_query_status(struct amdgpu_ring *kiq_ring,
184 				       struct amdgpu_ring *ring,
185 				       u64 addr, u64 seq)
186 {
187 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
188 
189 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
190 	amdgpu_ring_write(kiq_ring,
191 			  PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
192 			  PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
193 			  PACKET3_QUERY_STATUS_COMMAND(2));
194 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
195 			  PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
196 			  PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
197 	amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
198 	amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
199 	amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
200 	amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
201 }
202 
203 static void gfx_v12_1_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
204 					  uint16_t pasid,
205 					  uint32_t flush_type,
206 					  bool all_hub)
207 {
208 	gfx_v12_1_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1);
209 }
210 
211 static const struct kiq_pm4_funcs gfx_v12_1_kiq_pm4_funcs = {
212 	.kiq_set_resources = gfx_v12_1_kiq_set_resources,
213 	.kiq_map_queues = gfx_v12_1_kiq_map_queues,
214 	.kiq_unmap_queues = gfx_v12_1_kiq_unmap_queues,
215 	.kiq_query_status = gfx_v12_1_kiq_query_status,
216 	.kiq_invalidate_tlbs = gfx_v12_1_kiq_invalidate_tlbs,
217 	.set_resources_size = 8,
218 	.map_queues_size = 7,
219 	.unmap_queues_size = 6,
220 	.query_status_size = 7,
221 	.invalidate_tlbs_size = 2,
222 };
223 
224 static void gfx_v12_1_set_kiq_pm4_funcs(struct amdgpu_device *adev)
225 {
226 	int i, num_xcc;
227 
228 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
229 	for (i =0; i < num_xcc; i++)
230 		adev->gfx.kiq[i].pmf = &gfx_v12_1_kiq_pm4_funcs;
231 }
232 
233 static void gfx_v12_1_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
234 				   int mem_space, int opt, uint32_t addr0,
235 				   uint32_t addr1, uint32_t ref,
236 				   uint32_t mask, uint32_t inv)
237 {
238 	if (mem_space == 0) {
239 		addr0 = soc_v1_0_normalize_xcc_reg_offset(addr0);
240 		addr1 = soc_v1_0_normalize_xcc_reg_offset(addr1);
241 	}
242 
243 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
244 	amdgpu_ring_write(ring,
245 			  /* memory (1) or register (0) */
246 			  (WAIT_REG_MEM_MEM_SPACE(mem_space) |
247 			   WAIT_REG_MEM_OPERATION(opt) | /* wait */
248 			   WAIT_REG_MEM_FUNCTION(3) |  /* equal */
249 			   WAIT_REG_MEM_ENGINE(eng_sel)));
250 
251 	if (mem_space)
252 		BUG_ON(addr0 & 0x3); /* Dword align */
253 	amdgpu_ring_write(ring, addr0);
254 	amdgpu_ring_write(ring, addr1);
255 	amdgpu_ring_write(ring, ref);
256 	amdgpu_ring_write(ring, mask);
257 	amdgpu_ring_write(ring, inv); /* poll interval */
258 }
259 
260 static int gfx_v12_1_ring_test_ring(struct amdgpu_ring *ring)
261 {
262 	struct amdgpu_device *adev = ring->adev;
263 	uint32_t scratch_reg0_offset, xcc_offset;
264 	uint32_t tmp = 0;
265 	unsigned i;
266 	int r;
267 
268 	/* Use register offset which is local to XCC in the packet */
269 	xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
270 	scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0);
271 	WREG32(scratch_reg0_offset, 0xCAFEDEAD);
272 	tmp = RREG32(scratch_reg0_offset);
273 
274 	r = amdgpu_ring_alloc(ring, 5);
275 	if (r) {
276 		dev_err(adev->dev,
277 			"amdgpu: cp failed to lock ring %d (%d).\n",
278 			ring->idx, r);
279 		return r;
280 	}
281 
282 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
283 		gfx_v12_1_ring_emit_wreg(ring, xcc_offset, 0xDEADBEEF);
284 	} else {
285 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
286 		amdgpu_ring_write(ring, xcc_offset -
287 				  PACKET3_SET_UCONFIG_REG_START);
288 		amdgpu_ring_write(ring, 0xDEADBEEF);
289 	}
290 	amdgpu_ring_commit(ring);
291 
292 	for (i = 0; i < adev->usec_timeout; i++) {
293 		tmp = RREG32(scratch_reg0_offset);
294 		if (tmp == 0xDEADBEEF)
295 			break;
296 		if (amdgpu_emu_mode == 1)
297 			msleep(1);
298 		else
299 			udelay(1);
300 	}
301 
302 	if (i >= adev->usec_timeout)
303 		r = -ETIMEDOUT;
304 	return r;
305 }
306 
307 static int gfx_v12_1_ring_test_ib(struct amdgpu_ring *ring, long timeout)
308 {
309 	struct amdgpu_device *adev = ring->adev;
310 	struct amdgpu_ib ib;
311 	struct dma_fence *f = NULL;
312 	unsigned index;
313 	uint64_t gpu_addr;
314 	volatile uint32_t *cpu_ptr;
315 	long r;
316 
317 	/* MES KIQ fw hasn't indirect buffer support for now */
318 	if (adev->enable_mes_kiq &&
319 	    ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
320 		return 0;
321 
322 	memset(&ib, 0, sizeof(ib));
323 
324 	r = amdgpu_device_wb_get(adev, &index);
325 	if (r)
326 		return r;
327 
328 	gpu_addr = adev->wb.gpu_addr + (index * 4);
329 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
330 	cpu_ptr = &adev->wb.wb[index];
331 
332 	r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
333 	if (r) {
334 		dev_err(adev->dev, "amdgpu: failed to get ib (%ld).\n", r);
335 		goto err1;
336 	}
337 
338 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
339 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
340 	ib.ptr[2] = lower_32_bits(gpu_addr);
341 	ib.ptr[3] = upper_32_bits(gpu_addr);
342 	ib.ptr[4] = 0xDEADBEEF;
343 	ib.length_dw = 5;
344 
345 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
346 	if (r)
347 		goto err2;
348 
349 	r = dma_fence_wait_timeout(f, false, timeout);
350 	if (r == 0) {
351 		r = -ETIMEDOUT;
352 		goto err2;
353 	} else if (r < 0) {
354 		goto err2;
355 	}
356 
357 	if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF)
358 		r = 0;
359 	else
360 		r = -EINVAL;
361 err2:
362 	amdgpu_ib_free(&ib, NULL);
363 	dma_fence_put(f);
364 err1:
365 	amdgpu_device_wb_free(adev, index);
366 	return r;
367 }
368 
369 static void gfx_v12_1_free_microcode(struct amdgpu_device *adev)
370 {
371 	amdgpu_ucode_release(&adev->gfx.rlc_fw);
372 	amdgpu_ucode_release(&adev->gfx.mec_fw);
373 
374 	kfree(adev->gfx.rlc.register_list_format);
375 }
376 
377 static int gfx_v12_1_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix)
378 {
379 	const struct psp_firmware_header_v1_0 *toc_hdr;
380 	int err = 0;
381 
382 	err = amdgpu_ucode_request(adev, &adev->psp.toc_fw,
383 				   AMDGPU_UCODE_REQUIRED,
384 				   "amdgpu/%s_toc.bin", ucode_prefix);
385 	if (err)
386 		goto out;
387 
388 	toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
389 	adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
390 	adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
391 	adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
392 	adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
393 			le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
394 	return 0;
395 out:
396 	amdgpu_ucode_release(&adev->psp.toc_fw);
397 	return err;
398 }
399 
400 static int gfx_v12_1_init_microcode(struct amdgpu_device *adev)
401 {
402 	char ucode_prefix[15];
403 	int err;
404 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
405 	uint16_t version_major;
406 	uint16_t version_minor;
407 
408 	DRM_DEBUG("\n");
409 
410 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
411 
412 	if (!amdgpu_sriov_vf(adev)) {
413 		err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
414 					   AMDGPU_UCODE_REQUIRED,
415 					   "amdgpu/%s_rlc.bin", ucode_prefix);
416 		if (err)
417 			goto out;
418 		rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
419 		version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
420 		version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
421 		err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
422 		if (err)
423 			goto out;
424 	}
425 
426 	err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
427 				   AMDGPU_UCODE_REQUIRED,
428 				   "amdgpu/%s_mec.bin", ucode_prefix);
429 	if (err)
430 		goto out;
431 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC);
432 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK);
433 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK);
434 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK);
435 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK);
436 
437 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
438 		err = gfx_v12_1_init_toc_microcode(adev, ucode_prefix);
439 
440 	/* only one MEC for gfx 12 */
441 	adev->gfx.mec2_fw = NULL;
442 
443 	if (adev->gfx.imu.funcs) {
444 		if (adev->gfx.imu.funcs->init_microcode) {
445 			err = adev->gfx.imu.funcs->init_microcode(adev);
446 			if (err)
447 				dev_err(adev->dev, "Failed to load imu firmware!\n");
448 		}
449 	}
450 
451 out:
452 	if (err) {
453 		amdgpu_ucode_release(&adev->gfx.rlc_fw);
454 		amdgpu_ucode_release(&adev->gfx.mec_fw);
455 	}
456 
457 	return err;
458 }
459 
460 static u32 gfx_v12_1_get_csb_size(struct amdgpu_device *adev)
461 {
462 	u32 count = 0;
463 	const struct cs_section_def *sect = NULL;
464 	const struct cs_extent_def *ext = NULL;
465 
466 	count += 1;
467 
468 	for (sect = gfx12_cs_data; sect->section != NULL; ++sect) {
469 		if (sect->id == SECT_CONTEXT) {
470 			for (ext = sect->section; ext->extent != NULL; ++ext)
471 				count += 2 + ext->reg_count;
472 		} else
473 			return 0;
474 	}
475 
476 	return count;
477 }
478 
479 static void gfx_v12_1_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
480 {
481 	u32 count = 0, clustercount = 0, i;
482 	const struct cs_section_def *sect = NULL;
483 	const struct cs_extent_def *ext = NULL;
484 
485 	if (adev->gfx.rlc.cs_data == NULL)
486 		return;
487 	if (buffer == NULL)
488 		return;
489 
490 	count += 1;
491 
492 	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
493 		if (sect->id == SECT_CONTEXT) {
494 			for (ext = sect->section; ext->extent != NULL; ++ext) {
495 				clustercount++;
496 				buffer[count++] = ext->reg_count;
497 				buffer[count++] = ext->reg_index;
498 
499 				for (i = 0; i < ext->reg_count; i++)
500 					buffer[count++] = cpu_to_le32(ext->extent[i]);
501 			}
502 		} else
503 			return;
504 	}
505 
506 	buffer[0] = clustercount;
507 }
508 
509 static void gfx_v12_1_rlc_fini(struct amdgpu_device *adev)
510 {
511 	/* clear state block */
512 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
513 			&adev->gfx.rlc.clear_state_gpu_addr,
514 			(void **)&adev->gfx.rlc.cs_ptr);
515 
516 	/* jump table block */
517 	amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
518 			&adev->gfx.rlc.cp_table_gpu_addr,
519 			(void **)&adev->gfx.rlc.cp_table_ptr);
520 }
521 
522 static void gfx_v12_1_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
523 {
524 	int xcc_id, num_xcc;
525 	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
526 
527 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
528 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
529 		reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)];
530 
531 		reg_access_ctrl->grbm_cntl =
532 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL);
533 		reg_access_ctrl->grbm_idx =
534 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX);
535 
536 		reg_access_ctrl->vfi_cmd =
537 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_VFI_CMD);
538 		reg_access_ctrl->vfi_stat =
539 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_VFI_STAT);
540 		reg_access_ctrl->vfi_addr =
541 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_VFI_ADDR);
542 		reg_access_ctrl->vfi_data =
543 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_VFI_DATA);
544 		reg_access_ctrl->vfi_grbm_cntl =
545 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_VFI_GRBM_GFX_CNTL);
546 		reg_access_ctrl->vfi_grbm_idx =
547 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_VFI_GRBM_GFX_INDEX);
548 		reg_access_ctrl->vfi_grbm_cntl_data = 0;
549 		reg_access_ctrl->vfi_grbm_idx_data = 0;
550 	}
551 	adev->gfx.rlc.rlcg_reg_access_supported = true;
552 }
553 
554 static int gfx_v12_1_rlc_init(struct amdgpu_device *adev)
555 {
556 	const struct cs_section_def *cs_data;
557 	int r, i, num_xcc;
558 
559 	adev->gfx.rlc.cs_data = gfx12_cs_data;
560 
561 	cs_data = adev->gfx.rlc.cs_data;
562 
563 	if (cs_data) {
564 		/* init clear state block */
565 		r = amdgpu_gfx_rlc_init_csb(adev);
566 		if (r)
567 			return r;
568 	}
569 
570 	/* init spm vmid with 0xf */
571 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
572 	for (i = 0; i < num_xcc; i++) {
573 		if (adev->gfx.rlc.funcs->update_spm_vmid)
574 			adev->gfx.rlc.funcs->update_spm_vmid(adev, i, NULL, 0xf);
575 	}
576 
577 	return 0;
578 }
579 
580 static void gfx_v12_1_mec_fini(struct amdgpu_device *adev)
581 {
582 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
583 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
584 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL);
585 }
586 
587 static int gfx_v12_1_mec_init(struct amdgpu_device *adev)
588 {
589 	int r, i, num_xcc;
590 	u32 *hpd;
591 	size_t mec_hpd_size;
592 
593 	bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
594 
595 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
596 	for (i = 0; i < num_xcc; i++)
597 		bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap,
598 			    AMDGPU_MAX_COMPUTE_QUEUES);
599 
600 	/* take ownership of the relevant compute queues */
601 	amdgpu_gfx_compute_queue_acquire(adev);
602 	mec_hpd_size = adev->gfx.num_compute_rings *
603 		       GFX12_MEC_HPD_SIZE * num_xcc;
604 
605 	if (mec_hpd_size) {
606 		r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
607 					      AMDGPU_GEM_DOMAIN_GTT,
608 					      &adev->gfx.mec.hpd_eop_obj,
609 					      &adev->gfx.mec.hpd_eop_gpu_addr,
610 					      (void **)&hpd);
611 		if (r) {
612 			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
613 			gfx_v12_1_mec_fini(adev);
614 			return r;
615 		}
616 
617 		memset(hpd, 0, mec_hpd_size);
618 
619 		amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
620 		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
621 	}
622 
623 	return 0;
624 }
625 
626 static uint32_t wave_read_ind(struct amdgpu_device *adev,
627 			      uint32_t xcc_id, uint32_t wave,
628 			      uint32_t address)
629 {
630 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
631 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
632 		(address << SQ_IND_INDEX__INDEX__SHIFT));
633 	return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
634 }
635 
636 static void wave_read_regs(struct amdgpu_device *adev,
637 			   uint32_t xcc_id, uint32_t wave,
638 			   uint32_t thread, uint32_t regno,
639 			   uint32_t num, uint32_t *out)
640 {
641 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
642 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
643 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
644 		(thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
645 		(SQ_IND_INDEX__AUTO_INCR_MASK));
646 	while (num--)
647 		*(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
648 }
649 
650 static void gfx_v12_1_read_wave_data(struct amdgpu_device *adev,
651 				     uint32_t xcc_id,
652 				     uint32_t simd, uint32_t wave,
653 				     uint32_t *dst, int *no_fields)
654 {
655 	/* in gfx12 the SIMD_ID is specified as part of the INSTANCE
656 	 * field when performing a select_se_sh so it should be
657 	 * zero here */
658 	WARN_ON(simd != 0);
659 
660 	/* type 4 wave data */
661 	dst[(*no_fields)++] = 4;
662 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_STATUS);
663 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_PC_LO);
664 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_PC_HI);
665 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXEC_LO);
666 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXEC_HI);
667 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_HW_ID1);
668 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_HW_ID2);
669 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_GPR_ALLOC);
670 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_LDS_ALLOC);
671 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_IB_STS);
672 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_IB_STS2);
673 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_IB_DBG1);
674 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_M0);
675 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_MODE);
676 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_STATE_PRIV);
677 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXCP_FLAG_PRIV);
678 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXCP_FLAG_USER);
679 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_TRAP_CTRL);
680 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_ACTIVE);
681 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_VALID_AND_IDLE);
682 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_DVGPR_ALLOC_LO);
683 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_DVGPR_ALLOC_HI);
684 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_SCHED_MODE);
685 }
686 
687 static void gfx_v12_1_read_wave_sgprs(struct amdgpu_device *adev,
688 				      uint32_t xcc_id, uint32_t simd,
689 				      uint32_t wave, uint32_t start,
690 				      uint32_t size, uint32_t *dst)
691 {
692 	WARN_ON(simd != 0);
693 
694 	wave_read_regs(adev, xcc_id, wave, 0,
695 		       start + SQIND_WAVE_SGPRS_OFFSET,
696 		       size, dst);
697 }
698 
699 static void gfx_v12_1_read_wave_vgprs(struct amdgpu_device *adev,
700 				      uint32_t xcc_id, uint32_t simd,
701 				      uint32_t wave, uint32_t thread,
702 				      uint32_t start, uint32_t size,
703 				      uint32_t *dst)
704 {
705 	wave_read_regs(adev, xcc_id, wave, thread,
706 		       start + SQIND_WAVE_VGPRS_OFFSET,
707 		       size, dst);
708 }
709 
710 static void gfx_v12_1_select_me_pipe_q(struct amdgpu_device *adev,
711 				       u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
712 {
713 	soc_v1_0_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id));
714 }
715 
716 static int gfx_v12_1_get_xccs_per_xcp(struct amdgpu_device *adev)
717 {
718 	/* Fill this in when the interface is ready */
719 	return 1;
720 }
721 
722 static int gfx_v12_1_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node)
723 {
724 	int logic_xcc;
725 	int xcc = (ih_node & 0x7) - 2 + (ih_node >> 3) * 4;
726 
727 	for (logic_xcc = 0; logic_xcc < NUM_XCC(adev->gfx.xcc_mask); logic_xcc++) {
728 		if (xcc == GET_INST(GC, logic_xcc))
729 			return logic_xcc;
730 	}
731 
732 	dev_err(adev->dev, "Couldn't find xcc mapping from IH node");
733 	return -EINVAL;
734 }
735 
736 static const struct amdgpu_gfx_funcs gfx_v12_1_gfx_funcs = {
737 	.get_gpu_clock_counter = &gfx_v12_1_get_gpu_clock_counter,
738 	.select_se_sh = &gfx_v12_1_xcc_select_se_sh,
739 	.read_wave_data = &gfx_v12_1_read_wave_data,
740 	.read_wave_sgprs = &gfx_v12_1_read_wave_sgprs,
741 	.read_wave_vgprs = &gfx_v12_1_read_wave_vgprs,
742 	.select_me_pipe_q = &gfx_v12_1_select_me_pipe_q,
743 	.update_perfmon_mgcg = &gfx_v12_1_update_perf_clk,
744 	.get_xccs_per_xcp = &gfx_v12_1_get_xccs_per_xcp,
745 	.ih_node_to_logical_xcc = &gfx_v12_1_ih_to_xcc_inst,
746 };
747 
748 static int gfx_v12_1_gpu_early_init(struct amdgpu_device *adev)
749 {
750 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
751 	case IP_VERSION(12, 1, 0):
752 		adev->gfx.config.max_hw_contexts = 8;
753 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
754 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
755 		adev->gfx.config.sc_hiz_tile_fifo_size = 0;
756 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
757 		break;
758 	default:
759 		BUG();
760 		break;
761 	}
762 
763 	return 0;
764 }
765 
766 static int gfx_v12_1_compute_ring_init(struct amdgpu_device *adev, int ring_id,
767 				       int xcc_id, int mec, int pipe, int queue)
768 {
769 	int r;
770 	unsigned irq_type;
771 	struct amdgpu_ring *ring;
772 	unsigned int hw_prio;
773 	uint32_t xcc_doorbell_start;
774 
775 	ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings +
776 				       ring_id];
777 
778 	/* mec0 is me1 */
779 	ring->xcc_id = xcc_id;
780 	ring->me = mec + 1;
781 	ring->pipe = pipe;
782 	ring->queue = queue;
783 
784 	ring->ring_obj = NULL;
785 	ring->use_doorbell = true;
786 	xcc_doorbell_start = adev->doorbell_index.mec_ring0 +
787 			     xcc_id * adev->doorbell_index.xcc_doorbell_range;
788 	ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1;
789 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr +
790 			     (ring_id + xcc_id * adev->gfx.num_compute_rings) *
791 			     GFX12_MEC_HPD_SIZE;
792 	ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
793 	sprintf(ring->name, "comp_%d.%d.%d.%d",
794 			ring->xcc_id, ring->me, ring->pipe, ring->queue);
795 
796 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
797 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
798 		+ ring->pipe;
799 	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
800 			AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
801 	/* type-2 packets are deprecated on MEC, use type-3 instead */
802 	r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
803 			     hw_prio, NULL);
804 	if (r)
805 		return r;
806 
807 	return 0;
808 }
809 
810 static struct {
811 	SOC24_FIRMWARE_ID	id;
812 	unsigned int		offset;
813 	unsigned int		size;
814 	unsigned int		size_x16;
815 	unsigned int		num_inst;
816 } rlc_autoload_info[SOC24_FIRMWARE_ID_MAX];
817 
818 #define RLC_TOC_OFFSET_DWUNIT   8
819 #define RLC_SIZE_MULTIPLE       1024
820 #define RLC_TOC_UMF_SIZE_inM	23ULL
821 #define RLC_TOC_FORMAT_API	165ULL
822 
823 #define RLC_NUM_INS_CODE0   1
824 #define RLC_NUM_INS_CODE1   8
825 #define RLC_NUM_INS_CODE2   2
826 #define RLC_NUM_INS_CODE3   16
827 
828 static void gfx_v12_1_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc)
829 {
830 	RLC_TABLE_OF_CONTENT_V2 *ucode = rlc_toc;
831 
832 	while (ucode && (ucode->id > SOC24_FIRMWARE_ID_INVALID)) {
833 		rlc_autoload_info[ucode->id].id = ucode->id;
834 		rlc_autoload_info[ucode->id].offset =
835 			ucode->offset * RLC_TOC_OFFSET_DWUNIT * 4;
836 		rlc_autoload_info[ucode->id].size =
837 			ucode->size_x16 ? ucode->size * RLC_SIZE_MULTIPLE * 4 :
838 					  ucode->size * 4;
839 		switch (ucode->vfflr_image_code) {
840 		case 0:
841 			rlc_autoload_info[ucode->id].num_inst =
842 				RLC_NUM_INS_CODE0;
843 			break;
844 		case 1:
845 			rlc_autoload_info[ucode->id].num_inst =
846 				RLC_NUM_INS_CODE1;
847 			break;
848 		case 2:
849 			rlc_autoload_info[ucode->id].num_inst =
850 				RLC_NUM_INS_CODE2;
851 			break;
852 		case 3:
853 			rlc_autoload_info[ucode->id].num_inst =
854 				RLC_NUM_INS_CODE3;
855 			break;
856 		default:
857 			dev_err(adev->dev,
858 				"Invalid Instance number detected\n");
859 			break;
860 		}
861 		ucode++;
862 	}
863 }
864 
865 static uint32_t gfx_v12_1_calc_toc_total_size(struct amdgpu_device *adev)
866 {
867 	uint32_t total_size = 0;
868 	SOC24_FIRMWARE_ID id;
869 
870 	gfx_v12_1_parse_rlc_toc(adev, adev->psp.toc.start_addr);
871 
872 	for (id = SOC24_FIRMWARE_ID_RLC_G_UCODE; id < SOC24_FIRMWARE_ID_MAX; id++)
873 		total_size += rlc_autoload_info[id].size;
874 
875 	/* In case the offset in rlc toc ucode is aligned */
876 	if (total_size < rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].offset)
877 		total_size = rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].offset +
878 			rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].size;
879 	if (total_size < (RLC_TOC_UMF_SIZE_inM << 20))
880 		total_size = RLC_TOC_UMF_SIZE_inM << 20;
881 
882 	return total_size;
883 }
884 
885 static int gfx_v12_1_rlc_autoload_buffer_init(struct amdgpu_device *adev)
886 {
887 	int r;
888 	uint32_t total_size;
889 
890 	total_size = gfx_v12_1_calc_toc_total_size(adev);
891 
892 	r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024,
893 				      AMDGPU_GEM_DOMAIN_VRAM,
894 				      &adev->gfx.rlc.rlc_autoload_bo,
895 				      &adev->gfx.rlc.rlc_autoload_gpu_addr,
896 				      (void **)&adev->gfx.rlc.rlc_autoload_ptr);
897 
898 	if (r) {
899 		dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
900 		return r;
901 	}
902 
903 	return 0;
904 }
905 
906 static void gfx_v12_1_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev,
907 						       SOC24_FIRMWARE_ID id,
908 						       const void *fw_data,
909 						       uint32_t fw_size)
910 {
911 	uint32_t toc_offset;
912 	uint32_t toc_fw_size, toc_fw_inst_size;
913 	char *ptr = adev->gfx.rlc.rlc_autoload_ptr;
914 	int i, num_inst;
915 
916 	if (id <= SOC24_FIRMWARE_ID_INVALID || id >= SOC24_FIRMWARE_ID_MAX)
917 		return;
918 
919 	toc_offset = rlc_autoload_info[id].offset;
920 	toc_fw_size = rlc_autoload_info[id].size;
921 	num_inst = rlc_autoload_info[id].num_inst;
922 	toc_fw_inst_size = toc_fw_size / num_inst;
923 
924 	if (fw_size == 0)
925 		fw_size = toc_fw_inst_size;
926 
927 	if (fw_size > toc_fw_inst_size)
928 		fw_size = toc_fw_inst_size;
929 
930 	for (i = 0; i < num_inst; i++) {
931 		if ((num_inst == RLC_NUM_INS_CODE0) ||
932 		    ((1 << (i / 2)) & adev->gfx.xcc_mask)) {
933 			memcpy(ptr + toc_offset + i * toc_fw_inst_size, fw_data, fw_size);
934 
935 			if (fw_size < toc_fw_inst_size)
936 				memset(ptr + toc_offset + fw_size + i * toc_fw_inst_size,
937 				       0, toc_fw_inst_size - fw_size);
938 		}
939 	}
940 }
941 
942 static void
943 gfx_v12_1_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev)
944 {
945 	void *data;
946 	uint32_t size;
947 	uint32_t *toc_ptr;
948 
949 	data = adev->psp.toc.start_addr;
950 	size = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_TOC].size;
951 
952 	toc_ptr = (uint32_t *)data + size / 4 - 2;
953 	*toc_ptr = (RLC_TOC_FORMAT_API << 24) | 0x1;
954 
955 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_TOC,
956 						   data, size);
957 }
958 
959 static void
960 gfx_v12_1_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev)
961 {
962 	const __le32 *fw_data;
963 	uint32_t fw_size;
964 	const struct gfx_firmware_header_v2_0 *cpv2_hdr;
965 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
966 	const struct rlc_firmware_header_v2_1 *rlcv21_hdr;
967 	const struct rlc_firmware_header_v2_2 *rlcv22_hdr;
968 	uint16_t version_major, version_minor;
969 
970 	/* mec ucode */
971 	cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
972 		adev->gfx.mec_fw->data;
973 	/* instruction */
974 	fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
975 		le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
976 	fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
977 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC,
978 						   fw_data, fw_size);
979 	/* data */
980 	fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
981 		le32_to_cpu(cpv2_hdr->data_offset_bytes));
982 	fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
983 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P0_STACK,
984 						   fw_data, fw_size);
985 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P1_STACK,
986 						   fw_data, fw_size);
987 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P2_STACK,
988 						   fw_data, fw_size);
989 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P3_STACK,
990 						   fw_data, fw_size);
991 
992 	/* rlc ucode */
993 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)
994 		adev->gfx.rlc_fw->data;
995 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
996 			le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes));
997 	fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes);
998 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_G_UCODE,
999 						   fw_data, fw_size);
1000 
1001 	version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1002 	version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1003 	if (version_major == 2) {
1004 		if (version_minor >= 1) {
1005 			rlcv21_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
1006 
1007 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1008 					le32_to_cpu(rlcv21_hdr->save_restore_list_gpm_offset_bytes));
1009 			fw_size = le32_to_cpu(rlcv21_hdr->save_restore_list_gpm_size_bytes);
1010 			gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLCG_SCRATCH,
1011 						   fw_data, fw_size);
1012 
1013 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1014 					le32_to_cpu(rlcv21_hdr->save_restore_list_srm_offset_bytes));
1015 			fw_size = le32_to_cpu(rlcv21_hdr->save_restore_list_srm_size_bytes);
1016 			gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_SRM_ARAM,
1017 						   fw_data, fw_size);
1018 		}
1019 		if (version_minor >= 2) {
1020 			rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1021 
1022 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1023 					le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes));
1024 			fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes);
1025 			gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLX6_UCODE,
1026 						   fw_data, fw_size);
1027 
1028 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1029 					le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes));
1030 			fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes);
1031 			gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLX6_DRAM_BOOT,
1032 						   fw_data, fw_size);
1033 		}
1034 	}
1035 }
1036 
1037 static void
1038 gfx_v12_1_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev)
1039 {
1040 	const __le32 *fw_data;
1041 	uint32_t fw_size;
1042 	const struct sdma_firmware_header_v3_0 *sdma_hdr;
1043 
1044 	if (adev->sdma.instance[0].fw) {
1045 		sdma_hdr = (const struct sdma_firmware_header_v3_0 *)
1046 			adev->sdma.instance[0].fw->data;
1047 		fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1048 				le32_to_cpu(sdma_hdr->ucode_offset_bytes));
1049 		fw_size = le32_to_cpu(sdma_hdr->ucode_size_bytes);
1050 
1051 		gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_SDMA_UCODE_TH0,
1052 							   fw_data, fw_size);
1053 	}
1054 }
1055 
1056 static void
1057 gfx_v12_1_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev)
1058 {
1059 	const __le32 *fw_data;
1060 	unsigned fw_size;
1061 	const struct mes_firmware_header_v1_0 *mes_hdr;
1062 	int pipe, ucode_id, data_id;
1063 
1064 	for (pipe = 0; pipe < 2; pipe++) {
1065 		if (pipe == 0) {
1066 			ucode_id = SOC24_FIRMWARE_ID_RS64_MES_P0;
1067 			data_id  = SOC24_FIRMWARE_ID_RS64_MES_P0_STACK;
1068 		} else {
1069 			ucode_id = SOC24_FIRMWARE_ID_RS64_MES_P1;
1070 			data_id  = SOC24_FIRMWARE_ID_RS64_MES_P1_STACK;
1071 		}
1072 
1073 		mes_hdr = (const struct mes_firmware_header_v1_0 *)
1074 			adev->mes.fw[pipe]->data;
1075 
1076 		fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1077 				le32_to_cpu(mes_hdr->mes_ucode_offset_bytes));
1078 		fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
1079 
1080 		gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, ucode_id, fw_data, fw_size);
1081 
1082 		fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1083 				le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
1084 		fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
1085 
1086 		gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, data_id, fw_data, fw_size);
1087 	}
1088 }
1089 
1090 static int gfx_v12_1_rlc_backdoor_autoload_enable(struct amdgpu_device *adev)
1091 {
1092 	uint32_t rlc_g_offset, rlc_g_size;
1093 	uint64_t gpu_addr;
1094 	uint32_t data;
1095 	int i, num_xcc;
1096 
1097 	/* RLC autoload sequence 2: copy ucode */
1098 	gfx_v12_1_rlc_backdoor_autoload_copy_sdma_ucode(adev);
1099 	gfx_v12_1_rlc_backdoor_autoload_copy_gfx_ucode(adev);
1100 	gfx_v12_1_rlc_backdoor_autoload_copy_mes_ucode(adev);
1101 	gfx_v12_1_rlc_backdoor_autoload_copy_toc_ucode(adev);
1102 
1103 	rlc_g_offset = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_G_UCODE].offset;
1104 	rlc_g_size = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_G_UCODE].size;
1105 	gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset - adev->gmc.vram_start;
1106 
1107 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1108 	for (i = 0; i < num_xcc; i++) {
1109 		WREG32_SOC15(GC, GET_INST(GC, i),
1110 			     regGFX_IMU_RLC_BOOTLOADER_ADDR_HI,
1111 			     upper_32_bits(gpu_addr));
1112 		WREG32_SOC15(GC, GET_INST(GC, i),
1113 			     regGFX_IMU_RLC_BOOTLOADER_ADDR_LO,
1114 			     lower_32_bits(gpu_addr));
1115 		WREG32_SOC15(GC, GET_INST(GC, i),
1116 			     regGFX_IMU_RLC_BOOTLOADER_SIZE,
1117 			     rlc_g_size);
1118 	}
1119 
1120 	if (adev->gfx.imu.funcs) {
1121 		/* RLC autoload sequence 3: load IMU fw */
1122 		if (adev->gfx.imu.funcs->load_microcode)
1123 			adev->gfx.imu.funcs->load_microcode(adev);
1124 	}
1125 
1126 	/* unhalt rlc to start autoload */
1127 	for (i = 0; i < num_xcc; i++) {
1128 		data = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_THREAD_ENABLE);
1129 		data = REG_SET_FIELD(data, RLC_GPM_THREAD_ENABLE, THREAD0_ENABLE, 1);
1130 		data = REG_SET_FIELD(data, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1);
1131 		WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_THREAD_ENABLE, data);
1132 		WREG32_SOC15(GC, GET_INST(GC, i), regRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK);
1133 	}
1134 
1135 	return 0;
1136 }
1137 
1138 static int gfx_v12_1_sw_init(struct amdgpu_ip_block *ip_block)
1139 {
1140 	int i, j, k, r, ring_id = 0;
1141 	unsigned num_compute_rings;
1142 	int xcc_id, num_xcc;
1143 	struct amdgpu_device *adev = ip_block->adev;
1144 
1145 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1146 	case IP_VERSION(12, 1, 0):
1147 		adev->gfx.mec.num_mec = 1;
1148 		adev->gfx.mec.num_pipe_per_mec = 4;
1149 		adev->gfx.mec.num_queue_per_pipe = 8;
1150 		break;
1151 	default:
1152 		adev->gfx.mec.num_mec = 2;
1153 		adev->gfx.mec.num_pipe_per_mec = 2;
1154 		adev->gfx.mec.num_queue_per_pipe = 4;
1155 		break;
1156 	}
1157 
1158 	if (adev->gfx.num_compute_rings) {
1159 		/* recalculate compute rings to use based on hardware configuration */
1160 		num_compute_rings = (adev->gfx.mec.num_pipe_per_mec *
1161 				     adev->gfx.mec.num_queue_per_pipe) / 2;
1162 		adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings,
1163 						  num_compute_rings);
1164 	}
1165 
1166 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1167 
1168 	/* EOP Event */
1169 	r = amdgpu_irq_add_id(adev, SOC_V1_0_IH_CLIENTID_GRBM_CP,
1170 			      GFX_12_1_0__SRCID__CP_EOP_INTERRUPT,
1171 			      &adev->gfx.eop_irq);
1172 	if (r)
1173 		return r;
1174 
1175 	/* Privileged reg */
1176 	r = amdgpu_irq_add_id(adev, SOC_V1_0_IH_CLIENTID_GRBM_CP,
1177 			      GFX_12_1_0__SRCID__CP_PRIV_REG_FAULT,
1178 			      &adev->gfx.priv_reg_irq);
1179 	if (r)
1180 		return r;
1181 
1182 	/* Privileged inst */
1183 	r = amdgpu_irq_add_id(adev, SOC_V1_0_IH_CLIENTID_GRBM_CP,
1184 			      GFX_12_1_0__SRCID__CP_PRIV_INSTR_FAULT,
1185 			      &adev->gfx.priv_inst_irq);
1186 	if (r)
1187 		return r;
1188 
1189 	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1190 
1191 	r = gfx_v12_1_rlc_init(adev);
1192 	if (r) {
1193 		dev_err(adev->dev, "Failed to init rlc BOs!\n");
1194 		return r;
1195 	}
1196 
1197 	r = gfx_v12_1_mec_init(adev);
1198 	if (r) {
1199 		dev_err(adev->dev, "Failed to init MEC BOs!\n");
1200 		return r;
1201 	}
1202 
1203 	/* set up the compute queues - allocate horizontally across pipes */
1204 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1205 		ring_id = 0;
1206 		for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1207 			for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1208 				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1209 					if (!amdgpu_gfx_is_mec_queue_enabled(adev,
1210 								xcc_id, i, k, j))
1211 						continue;
1212 
1213 					r = gfx_v12_1_compute_ring_init(adev, ring_id,
1214 								xcc_id, i, k, j);
1215 					if (r)
1216 						return r;
1217 
1218 					ring_id++;
1219 				}
1220 			}
1221 		}
1222 
1223 		if (!adev->enable_mes_kiq) {
1224 			r = amdgpu_gfx_kiq_init(adev, GFX12_MEC_HPD_SIZE, xcc_id);
1225 			if (r) {
1226 				dev_err(adev->dev, "Failed to init KIQ BOs!\n");
1227 				return r;
1228 			}
1229 
1230 			r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
1231 			if (r)
1232 				return r;
1233 		}
1234 
1235 		r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v12_1_compute_mqd), xcc_id);
1236 		if (r)
1237 			return r;
1238 	}
1239 
1240 	/* allocate visible FB for rlc auto-loading fw */
1241 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1242 		r = gfx_v12_1_rlc_autoload_buffer_init(adev);
1243 		if (r)
1244 			return r;
1245 	} else if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1246 		r = gfx_v12_1_init_cp_compute_microcode_bo(adev);
1247 		if (r)
1248 			return r;
1249 	}
1250 
1251 	r = gfx_v12_1_gpu_early_init(adev);
1252 	if (r)
1253 		return r;
1254 
1255 	r = amdgpu_gfx_sysfs_init(adev);
1256 	if (r)
1257 		return r;
1258 
1259 	return 0;
1260 }
1261 
1262 static void gfx_v12_1_rlc_autoload_buffer_fini(struct amdgpu_device *adev)
1263 {
1264 	amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo,
1265 			&adev->gfx.rlc.rlc_autoload_gpu_addr,
1266 			(void **)&adev->gfx.rlc.rlc_autoload_ptr);
1267 }
1268 
1269 static int gfx_v12_1_sw_fini(struct amdgpu_ip_block *ip_block)
1270 {
1271 	int i, num_xcc;
1272 	struct amdgpu_device *adev = ip_block->adev;
1273 
1274 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1275 	for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++)
1276 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1277 
1278 	for (i = 0; i < num_xcc; i++) {
1279 		amdgpu_gfx_mqd_sw_fini(adev, i);
1280 
1281 		if (!adev->enable_mes_kiq) {
1282 			amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring);
1283 			amdgpu_gfx_kiq_fini(adev, i);
1284 		}
1285 	}
1286 
1287 	gfx_v12_1_rlc_fini(adev);
1288 	gfx_v12_1_mec_fini(adev);
1289 
1290 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
1291 		gfx_v12_1_rlc_autoload_buffer_fini(adev);
1292 
1293 	gfx_v12_1_free_microcode(adev);
1294 	amdgpu_gfx_sysfs_fini(adev);
1295 
1296 	return 0;
1297 }
1298 
1299 static void gfx_v12_1_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num,
1300 				       u32 sh_num, u32 instance, int xcc_id)
1301 {
1302 	u32 data;
1303 
1304 	if (instance == 0xffffffff)
1305 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
1306 				     INSTANCE_BROADCAST_WRITES, 1);
1307 	else
1308 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
1309 				     instance);
1310 
1311 	if (se_num == 0xffffffff)
1312 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
1313 				     1);
1314 	else
1315 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1316 
1317 	if (sh_num == 0xffffffff)
1318 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES,
1319 				     1);
1320 	else
1321 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num);
1322 
1323 	WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data);
1324 }
1325 
1326 static u32 gfx_v12_1_get_sa_active_bitmap(struct amdgpu_device *adev,
1327 					  int xcc_id)
1328 {
1329 	u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask;
1330 
1331 	gc_disabled_sa_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SA_UNIT_DISABLE);
1332 	gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask,
1333 					    CC_GC_SA_UNIT_DISABLE,
1334 					    SA_DISABLE);
1335 	gc_user_disabled_sa_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SA_UNIT_DISABLE);
1336 	gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask,
1337 						 GC_USER_SA_UNIT_DISABLE,
1338 						 SA_DISABLE);
1339 	sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se *
1340 					    adev->gfx.config.max_shader_engines);
1341 
1342 	return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask));
1343 }
1344 
1345 static u32 gfx_v12_1_get_rb_active_bitmap(struct amdgpu_device *adev,
1346 					  int xcc_id)
1347 {
1348 	u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask;
1349 	u32 rb_mask;
1350 
1351 	gc_disabled_rb_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
1352 					   regCC_RB_BACKEND_DISABLE);
1353 	gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask,
1354 					    CC_RB_BACKEND_DISABLE,
1355 					    BACKEND_DISABLE);
1356 	gc_user_disabled_rb_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
1357 						regGC_USER_RB_BACKEND_DISABLE);
1358 	gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask,
1359 						 GC_USER_RB_BACKEND_DISABLE,
1360 						 BACKEND_DISABLE);
1361 	rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se *
1362 					    adev->gfx.config.max_shader_engines);
1363 
1364 	return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask));
1365 }
1366 
1367 static void gfx_v12_1_setup_rb(struct amdgpu_device *adev)
1368 {
1369 	u32 rb_bitmap_width_per_sa;
1370 	u32 max_sa;
1371 	u32 active_sa_bitmap;
1372 	u32 global_active_rb_bitmap;
1373 	u32 active_rb_bitmap = 0;
1374 	u32 i;
1375 	int xcc_id;
1376 
1377 	for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
1378 		/* query sa bitmap from SA_UNIT_DISABLE registers */
1379 		active_sa_bitmap = gfx_v12_1_get_sa_active_bitmap(adev, xcc_id);
1380 		/* query rb bitmap from RB_BACKEND_DISABLE registers */
1381 		global_active_rb_bitmap = gfx_v12_1_get_rb_active_bitmap(adev, xcc_id);
1382 
1383 		/* generate active rb bitmap according to active sa bitmap */
1384 		max_sa = adev->gfx.config.max_shader_engines *
1385 			 adev->gfx.config.max_sh_per_se;
1386 		rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
1387 					 adev->gfx.config.max_sh_per_se;
1388 		for (i = 0; i < max_sa; i++) {
1389 			if (active_sa_bitmap & (1 << i))
1390 				active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
1391 		}
1392 
1393 		active_rb_bitmap |= global_active_rb_bitmap;
1394 	}
1395 
1396 	adev->gfx.config.backend_enable_mask = active_rb_bitmap;
1397 	adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
1398 }
1399 
1400 static void gfx_v12_1_xcc_init_compute_vmid(struct amdgpu_device *adev,
1401 					    int xcc_id)
1402 {
1403 	int i;
1404 	uint32_t sh_mem_bases;
1405 	uint32_t data;
1406 
1407 	/*
1408 	 * Configure apertures:
1409 	 * LDS:         0x20000000'00000000 - 0x20000001'00000000 (4GB)
1410 	 * Scratch:     0x10000000'00000000 - 0x11ffffff'ffffffff (128PB 57-bit)
1411 	 */
1412 	sh_mem_bases = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1413 				     (adev->gmc.private_aperture_start >> 58));
1414 	sh_mem_bases = REG_SET_FIELD(sh_mem_bases, SH_MEM_BASES, SHARED_BASE,
1415 				     (adev->gmc.shared_aperture_start >> 48));
1416 
1417 	mutex_lock(&adev->srbm_mutex);
1418 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1419 		soc_v1_0_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1420 		/* CP and shaders */
1421 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1422 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases);
1423 
1424 		/* Enable trap for each kfd vmid. */
1425 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL);
1426 		data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
1427 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data);
1428 
1429 		/* Disable VGPR deallocation instruction for each KFD vmid. */
1430 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_DEBUG);
1431 		data = REG_SET_FIELD(data, SQ_DEBUG, DISABLE_VGPR_DEALLOC, 1);
1432 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_DEBUG, data);
1433 	}
1434 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1435 	mutex_unlock(&adev->srbm_mutex);
1436 }
1437 
1438 static void gfx_v12_1_tcp_harvest(struct amdgpu_device *adev)
1439 {
1440 	/* TODO: harvest feature to be added later. */
1441 }
1442 
1443 static void gfx_v12_1_get_tcc_info(struct amdgpu_device *adev)
1444 {
1445 }
1446 
1447 static void gfx_v12_1_xcc_constants_init(struct amdgpu_device *adev,
1448 					 int xcc_id)
1449 {
1450 	u32 tmp;
1451 	int i;
1452 
1453 	/* XXX SH_MEM regs */
1454 	/* where to put LDS, scratch, GPUVM in FSA64 space */
1455 	mutex_lock(&adev->srbm_mutex);
1456 	for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
1457 		soc_v1_0_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1458 		/* CP and shaders */
1459 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1460 			     regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1461 		if (i != 0) {
1462 			tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1463 				(adev->gmc.private_aperture_start >> 58));
1464 			tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1465 				(adev->gmc.shared_aperture_start >> 48));
1466 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, tmp);
1467 		}
1468 	}
1469 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1470 
1471 	mutex_unlock(&adev->srbm_mutex);
1472 
1473 	gfx_v12_1_xcc_init_compute_vmid(adev, xcc_id);
1474 }
1475 
1476 static void gfx_v12_1_constants_init(struct amdgpu_device *adev)
1477 {
1478 	int i, num_xcc;
1479 
1480 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1481 
1482 	gfx_v12_1_setup_rb(adev);
1483 	gfx_v12_1_get_cu_info(adev, &adev->gfx.cu_info);
1484 	gfx_v12_1_get_tcc_info(adev);
1485 	adev->gfx.config.pa_sc_tile_steering_override = 0;
1486 
1487 	for (i = 0; i < num_xcc; i++)
1488 		gfx_v12_1_xcc_constants_init(adev, i);
1489 }
1490 
1491 static void gfx_v12_1_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1492 						    bool enable, int xcc_id)
1493 {
1494 	u32 tmp;
1495 
1496 	if (amdgpu_sriov_vf(adev))
1497 		return;
1498 
1499 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0);
1500 
1501 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
1502 			    enable ? 1 : 0);
1503 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
1504 			    enable ? 1 : 0);
1505 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
1506 			    enable ? 1 : 0);
1507 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
1508 			    enable ? 1 : 0);
1509 
1510 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp);
1511 }
1512 
1513 static int gfx_v12_1_xcc_init_csb(struct amdgpu_device *adev,
1514 				  int xcc_id)
1515 {
1516 	adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
1517 
1518 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CSIB_ADDR_HI,
1519 			adev->gfx.rlc.clear_state_gpu_addr >> 32);
1520 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CSIB_ADDR_LO,
1521 			adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1522 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1523 		     regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
1524 
1525 	return 0;
1526 }
1527 
1528 static void gfx_v12_1_xcc_rlc_stop(struct amdgpu_device *adev,
1529 				   int xcc_id)
1530 {
1531 	u32 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CNTL);
1532 
1533 	tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
1534 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CNTL, tmp);
1535 }
1536 
1537 static void gfx_v12_1_rlc_stop(struct amdgpu_device *adev)
1538 {
1539 	int i, num_xcc;
1540 
1541 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1542 	for (i = 0; i < num_xcc; i++)
1543 		gfx_v12_1_xcc_rlc_stop(adev, i);
1544 }
1545 
1546 static void gfx_v12_1_xcc_rlc_reset(struct amdgpu_device *adev,
1547 				    int xcc_id)
1548 {
1549 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id),
1550 			      GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
1551 	udelay(50);
1552 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id),
1553 			      GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
1554 	udelay(50);
1555 }
1556 
1557 static void gfx_v12_1_rlc_reset(struct amdgpu_device *adev)
1558 {
1559 	int i, num_xcc;
1560 
1561 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1562 	for (i = 0; i < num_xcc; i++)
1563 		gfx_v12_1_xcc_rlc_reset(adev, i);
1564 }
1565 
1566 static void gfx_v12_1_xcc_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
1567 						 bool enable, int xcc_id)
1568 {
1569 	uint32_t rlc_pg_cntl;
1570 
1571 	rlc_pg_cntl = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_PG_CNTL);
1572 
1573 	if (!enable) {
1574 		/* RLC_PG_CNTL[23] = 0 (default)
1575 		 * RLC will wait for handshake acks with SMU
1576 		 * GFXOFF will be enabled
1577 		 * RLC_PG_CNTL[23] = 1
1578 		 * RLC will not issue any message to SMU
1579 		 * hence no handshake between SMU & RLC
1580 		 * GFXOFF will be disabled
1581 		 */
1582 		rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
1583 	} else
1584 		rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
1585 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_PG_CNTL, rlc_pg_cntl);
1586 }
1587 
1588 static void gfx_v12_1_xcc_rlc_start(struct amdgpu_device *adev,
1589 				    int xcc_id)
1590 {
1591 	/* TODO: enable rlc & smu handshake until smu
1592 	 * and gfxoff feature works as expected */
1593 	if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK))
1594 		gfx_v12_1_xcc_rlc_smu_handshake_cntl(adev, false, xcc_id);
1595 
1596 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL, RLC_ENABLE_F32, 1);
1597 	udelay(50);
1598 }
1599 
1600 static void gfx_v12_1_rlc_start(struct amdgpu_device *adev)
1601 {
1602 	int i, num_xcc;
1603 
1604 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1605 	for (i = 0; i < num_xcc; i++) {
1606 		gfx_v12_1_xcc_rlc_start(adev, i);
1607 	}
1608 }
1609 
1610 static void gfx_v12_1_xcc_rlc_enable_srm(struct amdgpu_device *adev,
1611 					 int xcc_id)
1612 {
1613 	uint32_t tmp;
1614 
1615 	/* enable Save Restore Machine */
1616 	tmp = RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SRM_CNTL));
1617 	tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1618 	tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
1619 	WREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SRM_CNTL), tmp);
1620 }
1621 
1622 static void gfx_v12_1_xcc_load_rlcg_microcode(struct amdgpu_device *adev,
1623 					      int xcc_id)
1624 {
1625 	const struct rlc_firmware_header_v2_0 *hdr;
1626 	const __le32 *fw_data;
1627 	unsigned i, fw_size;
1628 
1629 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1630 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1631 			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1632 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1633 
1634 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR,
1635 		     RLCG_UCODE_LOADING_START_ADDRESS);
1636 
1637 	for (i = 0; i < fw_size; i++)
1638 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1639 			     regRLC_GPM_UCODE_DATA,
1640 			     le32_to_cpup(fw_data++));
1641 
1642 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1643 		     regRLC_GPM_UCODE_ADDR,
1644 		     adev->gfx.rlc_fw_version);
1645 }
1646 
1647 static void gfx_v12_1_xcc_load_rlc_iram_dram_microcode(struct amdgpu_device *adev,
1648 						       int xcc_id)
1649 {
1650 	const struct rlc_firmware_header_v2_2 *hdr;
1651 	const __le32 *fw_data;
1652 	unsigned i, fw_size;
1653 	u32 tmp;
1654 
1655 	hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1656 
1657 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1658 			le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes));
1659 	fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4;
1660 
1661 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_LX6_IRAM_ADDR, 0);
1662 
1663 	for (i = 0; i < fw_size; i++) {
1664 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1665 			msleep(1);
1666 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1667 			     regRLC_LX6_IRAM_DATA,
1668 			     le32_to_cpup(fw_data++));
1669 	}
1670 
1671 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1672 		     regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
1673 
1674 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1675 			le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes));
1676 	fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4;
1677 
1678 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1679 		     regRLC_LX6_DRAM_ADDR, 0);
1680 	for (i = 0; i < fw_size; i++) {
1681 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1682 			msleep(1);
1683 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1684 			     regRLC_LX6_DRAM_DATA,
1685 			     le32_to_cpup(fw_data++));
1686 	}
1687 
1688 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1689 		     regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
1690 
1691 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_LX6_CNTL);
1692 	tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1);
1693 	tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0);
1694 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_LX6_CNTL, tmp);
1695 }
1696 
1697 static int gfx_v12_1_xcc_rlc_load_microcode(struct amdgpu_device *adev,
1698 					    int xcc_id)
1699 {
1700 	const struct rlc_firmware_header_v2_0 *hdr;
1701 	uint16_t version_major;
1702 	uint16_t version_minor;
1703 
1704 	if (!adev->gfx.rlc_fw)
1705 		return -EINVAL;
1706 
1707 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1708 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
1709 
1710 	version_major = le16_to_cpu(hdr->header.header_version_major);
1711 	version_minor = le16_to_cpu(hdr->header.header_version_minor);
1712 
1713 	if (version_major == 2) {
1714 		gfx_v12_1_xcc_load_rlcg_microcode(adev, xcc_id);
1715 		if (amdgpu_dpm == 1) {
1716 			if (version_minor >= 2)
1717 				gfx_v12_1_xcc_load_rlc_iram_dram_microcode(adev, xcc_id);
1718 		}
1719 
1720 		return 0;
1721 	}
1722 
1723 	return -EINVAL;
1724 }
1725 
1726 static int gfx_v12_1_xcc_rlc_resume(struct amdgpu_device *adev,
1727 				    int xcc_id)
1728 {
1729 	int r;
1730 
1731 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1732 		gfx_v12_1_xcc_init_csb(adev, xcc_id);
1733 
1734 		if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
1735 			gfx_v12_1_xcc_rlc_enable_srm(adev, xcc_id);
1736 	} else {
1737 		if (amdgpu_sriov_vf(adev)) {
1738 			gfx_v12_1_xcc_init_csb(adev, xcc_id);
1739 			return 0;
1740 		}
1741 
1742 		gfx_v12_1_xcc_rlc_stop(adev, xcc_id);
1743 
1744 		/* disable CG */
1745 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0);
1746 
1747 		/* disable PG */
1748 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_PG_CNTL, 0);
1749 
1750 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1751 			/* legacy rlc firmware loading */
1752 			r = gfx_v12_1_xcc_rlc_load_microcode(adev, xcc_id);
1753 			if (r)
1754 				return r;
1755 		}
1756 
1757 		gfx_v12_1_xcc_init_csb(adev, xcc_id);
1758 
1759 		gfx_v12_1_xcc_rlc_start(adev, xcc_id);
1760 	}
1761 
1762 	return 0;
1763 }
1764 
1765 static int gfx_v12_1_rlc_resume(struct amdgpu_device *adev)
1766 {
1767 	int r, i, num_xcc;
1768 
1769 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1770 	for (i = 0; i < num_xcc; i++) {
1771 		r = gfx_v12_1_xcc_rlc_resume(adev, i);
1772 		if (r)
1773 			return r;
1774 	}
1775 
1776 	return 0;
1777 }
1778 
1779 static void gfx_v12_1_xcc_config_gfx_rs64(struct amdgpu_device *adev,
1780 					  int xcc_id)
1781 {
1782 	const struct gfx_firmware_header_v2_0 *mec_hdr;
1783 	uint32_t pipe_id, tmp;
1784 
1785 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)
1786 		adev->gfx.mec_fw->data;
1787 
1788 	/* config mec program start addr */
1789 	for (pipe_id = 0; pipe_id < 4; pipe_id++) {
1790 		soc_v1_0_grbm_select(adev, 1, pipe_id, 0, 0, GET_INST(GC, xcc_id));
1791 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START,
1792 					mec_hdr->ucode_start_addr_lo >> 2 |
1793 					mec_hdr->ucode_start_addr_hi << 30);
1794 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START_HI,
1795 					mec_hdr->ucode_start_addr_hi >> 2);
1796 	}
1797 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1798 
1799 	/* reset mec pipe */
1800 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL);
1801 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1);
1802 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1);
1803 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1);
1804 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1);
1805 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL, tmp);
1806 
1807 	/* clear mec pipe reset */
1808 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0);
1809 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0);
1810 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0);
1811 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0);
1812 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL, tmp);
1813 }
1814 
1815 static void gfx_v12_1_config_gfx_rs64(struct amdgpu_device *adev)
1816 {
1817 	int i, num_xcc;
1818 
1819 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1820 
1821 	for (i = 0; i < num_xcc; i++)
1822 		gfx_v12_1_xcc_config_gfx_rs64(adev, i);
1823 }
1824 
1825 static void gfx_v12_1_xcc_set_mec_ucode_start_addr(struct amdgpu_device *adev,
1826 						   int xcc_id)
1827 {
1828 	const struct gfx_firmware_header_v2_0 *cp_hdr;
1829 	unsigned pipe_id;
1830 
1831 	cp_hdr = (const struct gfx_firmware_header_v2_0 *)
1832 		adev->gfx.mec_fw->data;
1833 	mutex_lock(&adev->srbm_mutex);
1834 	for (pipe_id = 0; pipe_id < adev->gfx.mec.num_pipe_per_mec; pipe_id++) {
1835 		soc_v1_0_grbm_select(adev, 1, pipe_id, 0, 0, GET_INST(GC, xcc_id));
1836 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START,
1837 			     cp_hdr->ucode_start_addr_lo >> 2 |
1838 			     cp_hdr->ucode_start_addr_hi << 30);
1839 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START_HI,
1840 			     cp_hdr->ucode_start_addr_hi >> 2);
1841 	}
1842 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1843 	mutex_unlock(&adev->srbm_mutex);
1844 }
1845 
1846 static int gfx_v12_1_xcc_wait_for_rlc_autoload_complete(struct amdgpu_device *adev,
1847 							int xcc_id)
1848 {
1849 	uint32_t cp_status;
1850 	uint32_t bootload_status;
1851 	int i;
1852 
1853 	for (i = 0; i < adev->usec_timeout; i++) {
1854 		cp_status = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_STAT);
1855 		bootload_status = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
1856 					       regRLC_RLCS_BOOTLOAD_STATUS);
1857 
1858 		if ((cp_status == 0) &&
1859 		    (REG_GET_FIELD(bootload_status,
1860 			RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) {
1861 			break;
1862 		}
1863 		udelay(1);
1864 		if (amdgpu_emu_mode)
1865 			msleep(10);
1866 	}
1867 
1868 	if (i >= adev->usec_timeout) {
1869 		dev_err(adev->dev,
1870 			"rlc autoload: xcc%d gc ucode autoload timeout\n", xcc_id);
1871 		return -ETIMEDOUT;
1872 	}
1873 
1874 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1875 		gfx_v12_1_xcc_set_mec_ucode_start_addr(adev, xcc_id);
1876 	}
1877 
1878 	return 0;
1879 }
1880 
1881 static int gfx_v12_1_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
1882 {
1883 	int xcc_id;
1884 
1885 	for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++)
1886 		gfx_v12_1_xcc_wait_for_rlc_autoload_complete(adev, xcc_id);
1887 
1888 	return 0;
1889 }
1890 
1891 static void gfx_v12_1_xcc_cp_compute_enable(struct amdgpu_device *adev,
1892 					    bool enable, int xcc_id)
1893 {
1894 	u32 data;
1895 
1896 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL);
1897 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE,
1898 						 enable ? 0 : 1);
1899 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET,
1900 						 enable ? 0 : 1);
1901 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET,
1902 						 enable ? 0 : 1);
1903 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET,
1904 						 enable ? 0 : 1);
1905 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET,
1906 						 enable ? 0 : 1);
1907 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE,
1908 						 enable ? 1 : 0);
1909 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE,
1910 			                         enable ? 1 : 0);
1911 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE,
1912 						 enable ? 1 : 0);
1913 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE,
1914 						 enable ? 1 : 0);
1915 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT,
1916 						 enable ? 0 : 1);
1917 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL, data);
1918 
1919 	adev->gfx.kiq[xcc_id].ring.sched.ready = enable;
1920 
1921 	udelay(50);
1922 }
1923 
1924 static int gfx_v12_1_init_cp_compute_microcode_bo(struct amdgpu_device *adev)
1925 {
1926 	const struct gfx_firmware_header_v2_0 *mec_hdr;
1927 	const __le32 *fw_ucode, *fw_data;
1928 	u32 fw_ucode_size, fw_data_size;
1929 	u32 *fw_ucode_ptr, *fw_data_ptr;
1930 	int i, r, xcc_id;
1931 
1932 	if (!adev->gfx.mec_fw)
1933 		return -EINVAL;
1934 
1935 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
1936 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
1937 
1938 	fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data +
1939 				le32_to_cpu(mec_hdr->ucode_offset_bytes));
1940 	fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes);
1941 
1942 	fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1943 				le32_to_cpu(mec_hdr->data_offset_bytes));
1944 	fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes);
1945 
1946 	if (adev->gfx.mec.mec_fw_obj == NULL) {
1947 		r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
1948 					      64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
1949 					      &adev->gfx.mec.mec_fw_obj,
1950 					      &adev->gfx.mec.mec_fw_gpu_addr,
1951 					      (void **)&fw_ucode_ptr);
1952 		if (r) {
1953 			dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
1954 			gfx_v12_1_mec_fini(adev);
1955 			return r;
1956 		}
1957 
1958 		memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size);
1959 
1960 		amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1961 		amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1962 	}
1963 
1964 	if (adev->gfx.mec.mec_fw_data_obj == NULL) {
1965 		r = amdgpu_bo_create_reserved(adev,
1966 					      ALIGN(fw_data_size, 64 * 1024) *
1967 					      adev->gfx.mec.num_pipe_per_mec * NUM_XCC(adev->gfx.xcc_mask),
1968 					      64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
1969 					      &adev->gfx.mec.mec_fw_data_obj,
1970 					      &adev->gfx.mec.mec_fw_data_gpu_addr,
1971 					      (void **)&fw_data_ptr);
1972 		if (r) {
1973 			dev_err(adev->dev, "(%d) failed to create mec fw data bo\n", r);
1974 			gfx_v12_1_mec_fini(adev);
1975 			return r;
1976 		}
1977 
1978 		for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
1979 			for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
1980 				u32 offset = (xcc_id * adev->gfx.mec.num_pipe_per_mec + i) *
1981 					     ALIGN(fw_data_size, 64 * 1024) / 4;
1982 				memcpy(fw_data_ptr + offset, fw_data, fw_data_size);
1983 			}
1984 		}
1985 
1986 		amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj);
1987 		amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj);
1988 	}
1989 
1990 	return 0;
1991 }
1992 
1993 static int gfx_v12_1_xcc_cp_compute_load_microcode_rs64(struct amdgpu_device *adev,
1994 							int xcc_id)
1995 {
1996 	const struct gfx_firmware_header_v2_0 *mec_hdr;
1997 	u32 fw_data_size;
1998 	u32 tmp, i, usec_timeout = 50000; /* Wait for 50 ms */
1999 
2000 	if (!adev->gfx.mec_fw)
2001 		return -EINVAL;
2002 
2003 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
2004 	fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes);
2005 
2006 	gfx_v12_1_xcc_cp_compute_enable(adev, false, xcc_id);
2007 
2008 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL);
2009 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2010 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2011 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2012 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp);
2013 
2014 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_BASE_CNTL);
2015 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
2016 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
2017 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_BASE_CNTL, tmp);
2018 
2019 	mutex_lock(&adev->srbm_mutex);
2020 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
2021 		soc_v1_0_grbm_select(adev, 1, i, 0, 0, GET_INST(GC, xcc_id));
2022 
2023 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_MDBASE_LO,
2024 			     lower_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr +
2025 					   (xcc_id * adev->gfx.mec.num_pipe_per_mec + i) *
2026 					   ALIGN(fw_data_size, 64 * 1024)));
2027 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_MDBASE_HI,
2028 			     upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr +
2029 					   (xcc_id * adev->gfx.mec.num_pipe_per_mec + i) *
2030 					   ALIGN(fw_data_size, 64 * 1024)));
2031 
2032 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO,
2033 				lower_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2034 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI,
2035 				upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2036 	}
2037 	mutex_unlock(&adev->srbm_mutex);
2038 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2039 
2040 	/* Trigger an invalidation of the L1 instruction caches */
2041 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_OP_CNTL);
2042 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2043 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_OP_CNTL, tmp);
2044 
2045 	/* Wait for invalidation complete */
2046 	for (i = 0; i < usec_timeout; i++) {
2047 		tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_OP_CNTL);
2048 		if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
2049 				       INVALIDATE_DCACHE_COMPLETE))
2050 			break;
2051 		udelay(1);
2052 	}
2053 
2054 	if (i >= usec_timeout) {
2055 		dev_err(adev->dev, "failed to invalidate data cache\n");
2056 		return -EINVAL;
2057 	}
2058 
2059 	/* Trigger an invalidation of the L1 instruction caches */
2060 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_OP_CNTL);
2061 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2062 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_OP_CNTL, tmp);
2063 
2064 	/* Wait for invalidation complete */
2065 	for (i = 0; i < usec_timeout; i++) {
2066 		tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_OP_CNTL);
2067 		if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2068 				       INVALIDATE_CACHE_COMPLETE))
2069 			break;
2070 		udelay(1);
2071 	}
2072 
2073 	if (i >= usec_timeout) {
2074 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2075 		return -EINVAL;
2076 	}
2077 
2078 	gfx_v12_1_xcc_set_mec_ucode_start_addr(adev, xcc_id);
2079 
2080 	return 0;
2081 }
2082 
2083 static void gfx_v12_1_xcc_kiq_setting(struct amdgpu_ring *ring,
2084 				      int xcc_id)
2085 {
2086 	uint32_t tmp;
2087 	struct amdgpu_device *adev = ring->adev;
2088 
2089 	/* tell RLC which is KIQ queue */
2090 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
2091 	tmp &= 0xffffff00;
2092 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2093 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
2094 	tmp |= 0x80;
2095 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
2096 }
2097 
2098 static void gfx_v12_1_xcc_cp_set_doorbell_range(struct amdgpu_device *adev,
2099 						int xcc_id)
2100 {
2101 	/* disable gfx engine doorbell range */
2102 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_DOORBELL_RANGE_LOWER, 0);
2103 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_DOORBELL_RANGE_UPPER, 0);
2104 
2105 	/* set compute engine doorbell range */
2106 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DOORBELL_RANGE_LOWER,
2107 		     ((adev->doorbell_index.kiq +
2108 		       xcc_id * adev->doorbell_index.xcc_doorbell_range) *
2109 		      2) << 2);
2110 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DOORBELL_RANGE_UPPER,
2111 		     ((adev->doorbell_index.userqueue_end +
2112 		       xcc_id * adev->doorbell_index.xcc_doorbell_range) *
2113 		      2) << 2);
2114 }
2115 
2116 static int gfx_v12_1_compute_mqd_init(struct amdgpu_device *adev, void *m,
2117 				      struct amdgpu_mqd_prop *prop)
2118 {
2119 	struct v12_1_compute_mqd *mqd = m;
2120 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
2121 	uint32_t tmp;
2122 
2123 	mqd->header = 0xC0310800;
2124 	mqd->compute_pipelinestat_enable = 0x00000001;
2125 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
2126 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
2127 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
2128 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2129 	mqd->compute_misc_reserved = 0x00000007;
2130 
2131 	eop_base_addr = prop->eop_gpu_addr >> 8;
2132 	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
2133 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
2134 
2135 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2136 	tmp = regCP_HQD_EOP_CONTROL_DEFAULT;
2137 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
2138 			(order_base_2(GFX12_MEC_HPD_SIZE / 4) - 1));
2139 
2140 	mqd->cp_hqd_eop_control = tmp;
2141 
2142 	/* enable doorbell? */
2143 	tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
2144 
2145 	if (prop->use_doorbell) {
2146 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2147 				    DOORBELL_OFFSET, prop->doorbell_index);
2148 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2149 				    DOORBELL_EN, 1);
2150 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2151 				    DOORBELL_SOURCE, 0);
2152 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2153 				    DOORBELL_HIT, 0);
2154 	} else {
2155 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2156 				    DOORBELL_EN, 0);
2157 	}
2158 
2159 	mqd->cp_hqd_pq_doorbell_control = tmp;
2160 
2161 	/* disable the queue if it's active */
2162 	mqd->cp_hqd_dequeue_request = 0;
2163 	mqd->cp_hqd_pq_rptr = 0;
2164 	mqd->cp_hqd_pq_wptr_lo = 0;
2165 	mqd->cp_hqd_pq_wptr_hi = 0;
2166 
2167 	/* set the pointer to the MQD */
2168 	mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc;
2169 	mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
2170 
2171 	/* set MQD vmid to 0 */
2172 	tmp = regCP_MQD_CONTROL_DEFAULT;
2173 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
2174 	mqd->cp_mqd_control = tmp;
2175 
2176 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2177 	hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
2178 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
2179 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2180 
2181 	/* set up the HQD, this is similar to CP_RB0_CNTL */
2182 	tmp = regCP_HQD_PQ_CONTROL_DEFAULT;
2183 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
2184 			    (order_base_2(prop->queue_size / 4) - 1));
2185 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
2186 			    (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
2187 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
2188 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
2189 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
2190 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
2191 	mqd->cp_hqd_pq_control = tmp;
2192 
2193 	/* set the wb address whether it's enabled or not */
2194 	wb_gpu_addr = prop->rptr_gpu_addr;
2195 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
2196 	mqd->cp_hqd_pq_rptr_report_addr_hi =
2197 		upper_32_bits(wb_gpu_addr) & 0xffff;
2198 
2199 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2200 	wb_gpu_addr = prop->wptr_gpu_addr;
2201 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2202 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2203 
2204 	tmp = 0;
2205 	/* enable the doorbell if requested */
2206 	if (prop->use_doorbell) {
2207 		tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
2208 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2209 				DOORBELL_OFFSET, prop->doorbell_index);
2210 
2211 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2212 				    DOORBELL_EN, 1);
2213 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2214 				    DOORBELL_SOURCE, 0);
2215 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2216 				    DOORBELL_HIT, 0);
2217 	}
2218 
2219 	mqd->cp_hqd_pq_doorbell_control = tmp;
2220 
2221 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2222 	mqd->cp_hqd_pq_rptr = regCP_HQD_PQ_RPTR_DEFAULT;
2223 
2224 	/* set the vmid for the queue */
2225 	mqd->cp_hqd_vmid = 0;
2226 
2227 	tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT;
2228 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x63);
2229 	mqd->cp_hqd_persistent_state = tmp;
2230 
2231 	/* set MIN_IB_AVAIL_SIZE */
2232 	tmp = regCP_HQD_IB_CONTROL_DEFAULT;
2233 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 1);
2234 	mqd->cp_hqd_ib_control = tmp;
2235 
2236 	/* set static priority for a compute queue/ring */
2237 	mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority;
2238 	mqd->cp_hqd_queue_priority = prop->hqd_queue_priority;
2239 
2240 	mqd->cp_mqd_stride_size = prop->mqd_stride_size ? prop->mqd_stride_size :
2241 		AMDGPU_MQD_SIZE_ALIGN(adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size);
2242 
2243 	mqd->cp_hqd_active = prop->hqd_active;
2244 
2245 	return 0;
2246 }
2247 
2248 static int gfx_v12_1_xcc_kiq_init_register(struct amdgpu_ring *ring,
2249 					   int xcc_id)
2250 {
2251 	struct amdgpu_device *adev = ring->adev;
2252 	struct v12_1_compute_mqd *mqd = ring->mqd_ptr;
2253 	int j;
2254 
2255 	/* inactivate the queue */
2256 	if (amdgpu_sriov_vf(adev))
2257 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0);
2258 
2259 	/* disable wptr polling */
2260 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
2261 
2262 	/* write the EOP addr */
2263 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR,
2264 	       mqd->cp_hqd_eop_base_addr_lo);
2265 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI,
2266 	       mqd->cp_hqd_eop_base_addr_hi);
2267 
2268 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2269 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL,
2270 	       mqd->cp_hqd_eop_control);
2271 
2272 	/* enable doorbell? */
2273 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
2274 	       mqd->cp_hqd_pq_doorbell_control);
2275 
2276 	/* disable the queue if it's active */
2277 	if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
2278 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
2279 		for (j = 0; j < adev->usec_timeout; j++) {
2280 			if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
2281 				break;
2282 			udelay(1);
2283 		}
2284 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
2285 		       mqd->cp_hqd_dequeue_request);
2286 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR,
2287 		       mqd->cp_hqd_pq_rptr);
2288 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
2289 		       mqd->cp_hqd_pq_wptr_lo);
2290 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
2291 		       mqd->cp_hqd_pq_wptr_hi);
2292 	}
2293 
2294 	/* set the pointer to the MQD */
2295 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR,
2296 	       mqd->cp_mqd_base_addr_lo);
2297 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI,
2298 	       mqd->cp_mqd_base_addr_hi);
2299 
2300 	/* set MQD vmid to 0 */
2301 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL,
2302 	       mqd->cp_mqd_control);
2303 
2304 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2305 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE,
2306 	       mqd->cp_hqd_pq_base_lo);
2307 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI,
2308 	       mqd->cp_hqd_pq_base_hi);
2309 
2310 	/* set up the HQD, this is similar to CP_RB0_CNTL */
2311 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL,
2312 	       mqd->cp_hqd_pq_control);
2313 
2314 	/* set the wb address whether it's enabled or not */
2315 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR,
2316 		mqd->cp_hqd_pq_rptr_report_addr_lo);
2317 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
2318 		mqd->cp_hqd_pq_rptr_report_addr_hi);
2319 
2320 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2321 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR,
2322 	       mqd->cp_hqd_pq_wptr_poll_addr_lo);
2323 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
2324 	       mqd->cp_hqd_pq_wptr_poll_addr_hi);
2325 
2326 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
2327 	       mqd->cp_hqd_pq_doorbell_control);
2328 
2329 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2330 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
2331 	       mqd->cp_hqd_pq_wptr_lo);
2332 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
2333 	       mqd->cp_hqd_pq_wptr_hi);
2334 
2335 	/* set the vmid for the queue */
2336 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_VMID, mqd->cp_hqd_vmid);
2337 
2338 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE,
2339 	       mqd->cp_hqd_persistent_state);
2340 
2341 	/* activate the queue */
2342 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE,
2343 	       mqd->cp_hqd_active);
2344 
2345 	if (ring->use_doorbell)
2346 		WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_STATUS, DOORBELL_ENABLE, 1);
2347 
2348 	return 0;
2349 }
2350 
2351 static int gfx_v12_1_xcc_kiq_init_queue(struct amdgpu_ring *ring,
2352 					int xcc_id)
2353 {
2354 	struct amdgpu_device *adev = ring->adev;
2355 	struct v12_1_compute_mqd *mqd = ring->mqd_ptr;
2356 
2357 	gfx_v12_1_xcc_kiq_setting(ring, xcc_id);
2358 
2359 	if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
2360 		/* reset MQD to a clean status */
2361 		if (adev->gfx.kiq[xcc_id].mqd_backup)
2362 			memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(*mqd));
2363 
2364 		/* reset ring buffer */
2365 		ring->wptr = 0;
2366 		amdgpu_ring_clear_ring(ring);
2367 
2368 		mutex_lock(&adev->srbm_mutex);
2369 		soc_v1_0_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2370 		gfx_v12_1_xcc_kiq_init_register(ring, xcc_id);
2371 		soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2372 		mutex_unlock(&adev->srbm_mutex);
2373 	} else {
2374 		memset((void *)mqd, 0, sizeof(*mqd));
2375 		if (amdgpu_sriov_vf(adev) && adev->in_suspend)
2376 			amdgpu_ring_clear_ring(ring);
2377 		mutex_lock(&adev->srbm_mutex);
2378 		soc_v1_0_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2379 		amdgpu_ring_init_mqd(ring);
2380 		gfx_v12_1_xcc_kiq_init_register(ring, xcc_id);
2381 		soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2382 		mutex_unlock(&adev->srbm_mutex);
2383 
2384 		if (adev->gfx.kiq[xcc_id].mqd_backup)
2385 			memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(*mqd));
2386 	}
2387 
2388 	return 0;
2389 }
2390 
2391 static int gfx_v12_1_xcc_kcq_init_queue(struct amdgpu_ring *ring,
2392 					int xcc_id)
2393 {
2394 	struct amdgpu_device *adev = ring->adev;
2395 	struct v12_1_compute_mqd *mqd = ring->mqd_ptr;
2396 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
2397 
2398 	if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2399 		memset((void *)mqd, 0, sizeof(*mqd));
2400 		mutex_lock(&adev->srbm_mutex);
2401 		soc_v1_0_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2402 		amdgpu_ring_init_mqd(ring);
2403 		soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2404 		mutex_unlock(&adev->srbm_mutex);
2405 
2406 		if (adev->gfx.mec.mqd_backup[mqd_idx])
2407 			memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
2408 	} else {
2409 		/* restore MQD to a clean status */
2410 		if (adev->gfx.mec.mqd_backup[mqd_idx])
2411 			memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
2412 		/* reset ring buffer */
2413 		ring->wptr = 0;
2414 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
2415 		amdgpu_ring_clear_ring(ring);
2416 	}
2417 
2418 	return 0;
2419 }
2420 
2421 static int gfx_v12_1_xcc_kiq_resume(struct amdgpu_device *adev,
2422 				    int xcc_id)
2423 {
2424 	struct amdgpu_ring *ring;
2425 	int r;
2426 
2427 	ring = &adev->gfx.kiq[xcc_id].ring;
2428 
2429 	r = amdgpu_bo_reserve(ring->mqd_obj, false);
2430 	if (unlikely(r != 0))
2431 		return r;
2432 
2433 	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2434 	if (unlikely(r != 0)) {
2435 		amdgpu_bo_unreserve(ring->mqd_obj);
2436 		return r;
2437 	}
2438 
2439 	gfx_v12_1_xcc_kiq_init_queue(ring, xcc_id);
2440 	amdgpu_bo_kunmap(ring->mqd_obj);
2441 	ring->mqd_ptr = NULL;
2442 	amdgpu_bo_unreserve(ring->mqd_obj);
2443 	ring->sched.ready = true;
2444 	return 0;
2445 }
2446 
2447 static int gfx_v12_1_xcc_kcq_resume(struct amdgpu_device *adev,
2448 				    int xcc_id)
2449 {
2450 	struct amdgpu_ring *ring = NULL;
2451 	int r = 0, i;
2452 
2453 	if (!amdgpu_async_gfx_ring)
2454 		gfx_v12_1_xcc_cp_compute_enable(adev, true, xcc_id);
2455 
2456 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2457 		ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
2458 
2459 		r = amdgpu_bo_reserve(ring->mqd_obj, false);
2460 		if (unlikely(r != 0))
2461 			goto done;
2462 		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2463 		if (!r) {
2464 			r = gfx_v12_1_xcc_kcq_init_queue(ring, xcc_id);
2465 			amdgpu_bo_kunmap(ring->mqd_obj);
2466 			ring->mqd_ptr = NULL;
2467 		}
2468 		amdgpu_bo_unreserve(ring->mqd_obj);
2469 		if (r)
2470 			goto done;
2471 	}
2472 
2473 	r = amdgpu_gfx_enable_kcq(adev, xcc_id);
2474 done:
2475 	return r;
2476 }
2477 
2478 static int gfx_v12_1_xcc_cp_resume(struct amdgpu_device *adev, uint16_t xcc_mask)
2479 {
2480 	int r, i, xcc_id;
2481 	struct amdgpu_ring *ring;
2482 
2483 	for_each_inst(xcc_id, xcc_mask) {
2484 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
2485 			/* legacy firmware loading */
2486 			r = gfx_v12_1_xcc_cp_compute_load_microcode_rs64(adev, xcc_id);
2487 			if (r)
2488 				return r;
2489 		}
2490 
2491 		/* GFX CGCG and LS is set by default */
2492 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2493 			gfx_v12_1_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
2494 
2495 		gfx_v12_1_xcc_cp_set_doorbell_range(adev, xcc_id);
2496 
2497 		gfx_v12_1_xcc_cp_compute_enable(adev, true, xcc_id);
2498 
2499 		if (adev->enable_mes_kiq && adev->mes.kiq_hw_init)
2500 			r = amdgpu_mes_kiq_hw_init(adev, xcc_id);
2501 		else
2502 			r = gfx_v12_1_xcc_kiq_resume(adev, xcc_id);
2503 		if (r)
2504 			return r;
2505 
2506 		r = gfx_v12_1_xcc_kcq_resume(adev, xcc_id);
2507 		if (r)
2508 			return r;
2509 
2510 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2511 			ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
2512 			r = amdgpu_ring_test_helper(ring);
2513 			if (r)
2514 				return r;
2515 		}
2516 	}
2517 
2518 	return 0;
2519 }
2520 
2521 static int gfx_v12_1_cp_resume(struct amdgpu_device *adev)
2522 {
2523 	int num_xcc, num_xcp, num_xcc_per_xcp;
2524 	uint16_t xcc_mask;
2525 	int r = 0;
2526 
2527 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2528 	if (amdgpu_sriov_vf(adev)) {
2529 		enum amdgpu_gfx_partition mode;
2530 
2531 		mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2532 						       AMDGPU_XCP_FL_NONE);
2533 		if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2534 			return -EINVAL;
2535 		if (adev->gfx.funcs &&
2536 		    adev->gfx.funcs->get_xccs_per_xcp) {
2537 			num_xcc_per_xcp = adev->gfx.funcs->get_xccs_per_xcp(adev);
2538 			adev->gfx.num_xcc_per_xcp = num_xcc_per_xcp;
2539 			num_xcp = num_xcc / num_xcc_per_xcp;
2540 		} else {
2541 			return -EINVAL;
2542 		}
2543 		r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode);
2544 
2545 	} else {
2546 		if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2547 						    AMDGPU_XCP_FL_NONE) ==
2548 		    AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2549 			r = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr,
2550 							     amdgpu_user_partt_mode);
2551 	}
2552 
2553 	if (r)
2554 		return r;
2555 
2556 	xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
2557 
2558 	return gfx_v12_1_xcc_cp_resume(adev, xcc_mask);
2559 }
2560 
2561 static int gfx_v12_1_gfxhub_enable(struct amdgpu_device *adev)
2562 {
2563 	int r, i;
2564 	bool value;
2565 
2566 	r = adev->gfxhub.funcs->gart_enable(adev);
2567 	if (r)
2568 		return r;
2569 
2570 	value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
2571 		false : true;
2572 
2573 	adev->gfxhub.funcs->set_fault_enable_default(adev, value);
2574 	/* TODO investigate why TLB flush is needed,
2575 	 * are we missing a flush somewhere else? */
2576 	for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
2577 		if (AMDGPU_IS_GFXHUB(i))
2578 			adev->gmc.gmc_funcs->flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(i), 0);
2579 	}
2580 
2581 	return 0;
2582 }
2583 
2584 static int get_gb_addr_config(struct amdgpu_device *adev)
2585 {
2586 	u32 gb_addr_config;
2587 
2588 	gb_addr_config = RREG32_SOC15(GC, GET_INST(GC, 0), regGB_ADDR_CONFIG_READ);
2589 	if (gb_addr_config == 0)
2590 		return -EINVAL;
2591 
2592 	adev->gfx.config.gb_addr_config_fields.num_pkrs =
2593 		1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG_READ, NUM_PKRS);
2594 
2595 	adev->gfx.config.gb_addr_config = gb_addr_config;
2596 
2597 	adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
2598 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2599 				      GB_ADDR_CONFIG_READ, NUM_PIPES);
2600 
2601 	adev->gfx.config.max_tile_pipes =
2602 		adev->gfx.config.gb_addr_config_fields.num_pipes;
2603 
2604 	adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
2605 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2606 				      GB_ADDR_CONFIG_READ, MAX_COMPRESSED_FRAGS);
2607 	adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
2608 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2609 				      GB_ADDR_CONFIG_READ, NUM_RB_PER_SE);
2610 	adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
2611 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2612 				      GB_ADDR_CONFIG_READ, NUM_SHADER_ENGINES);
2613 	adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
2614 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2615 				      GB_ADDR_CONFIG_READ, PIPE_INTERLEAVE_SIZE));
2616 
2617 	return 0;
2618 }
2619 
2620 static void gfx_v12_1_xcc_disable_gpa_mode(struct amdgpu_device *adev,
2621 					   int xcc_id)
2622 {
2623 	uint32_t data;
2624 
2625 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG);
2626 	data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK;
2627 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data);
2628 
2629 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPG_PSP_DEBUG);
2630 	data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK;
2631 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPG_PSP_DEBUG, data);
2632 }
2633 
2634 static void gfx_v12_1_xcc_setup_tcp_thrashing_ctrl(struct amdgpu_device *adev,
2635 					 int xcc_id)
2636 {
2637 	uint32_t val;
2638 
2639 	/* Set the TCP UTCL0 register to enable atomics */
2640 	val = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2641 					regTCP_UTCL0_THRASHING_CTRL);
2642 	val = REG_SET_FIELD(val, TCP_UTCL0_THRASHING_CTRL, THRASHING_EN, 0x2);
2643 	val = REG_SET_FIELD(val, TCP_UTCL0_THRASHING_CTRL,
2644 					RETRY_FRAGMENT_THRESHOLD_UP_EN, 0x1);
2645 	val = REG_SET_FIELD(val, TCP_UTCL0_THRASHING_CTRL,
2646 					RETRY_FRAGMENT_THRESHOLD_DOWN_EN, 0x1);
2647 
2648 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2649 					regTCP_UTCL0_THRASHING_CTRL, val);
2650 }
2651 
2652 static void gfx_v12_1_xcc_enable_atomics(struct amdgpu_device *adev,
2653 					 int xcc_id)
2654 {
2655 	uint32_t data;
2656 
2657 	/* Set the TCP UTCL0 register to enable atomics */
2658 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regTCP_UTCL0_CNTL1);
2659 	data = REG_SET_FIELD(data, TCP_UTCL0_CNTL1, ATOMIC_REQUESTER_EN, 0x1);
2660 
2661 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regTCP_UTCL0_CNTL1, data);
2662 }
2663 
2664 static void gfx_v12_1_xcc_disable_burst(struct amdgpu_device *adev,
2665 					int xcc_id)
2666 {
2667 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGL1_DRAM_BURST_CTRL, 0xf);
2668 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGLARB_DRAM_BURST_CTRL, 0xf);
2669 }
2670 
2671 static void gfx_v12_1_xcc_disable_early_write_ack(struct amdgpu_device *adev,
2672 					int xcc_id)
2673 {
2674 	uint32_t data;
2675 
2676 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regTCP_CNTL3);
2677 	data = REG_SET_FIELD(data, TCP_CNTL3, DISABLE_EARLY_WRITE_ACK, 0x1);
2678 
2679 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regTCP_CNTL3, data);
2680 }
2681 
2682 static void gfx_v12_1_xcc_disable_tcp_spill_cache(struct amdgpu_device *adev,
2683 					int xcc_id)
2684 {
2685 	uint32_t data;
2686 
2687 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regTCP_CNTL);
2688 	data = REG_SET_FIELD(data, TCP_CNTL, TCP_SPILL_CACHE_DISABLE, 0x1);
2689 
2690 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regTCP_CNTL, data);
2691 }
2692 
2693 static void gfx_v12_1_init_golden_registers(struct amdgpu_device *adev)
2694 {
2695 	int i;
2696 
2697 	for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); i++) {
2698 		gfx_v12_1_xcc_disable_burst(adev, i);
2699 		gfx_v12_1_xcc_enable_atomics(adev, i);
2700 		gfx_v12_1_xcc_setup_tcp_thrashing_ctrl(adev, i);
2701 		gfx_v12_1_xcc_disable_early_write_ack(adev, i);
2702 		gfx_v12_1_xcc_disable_tcp_spill_cache(adev, i);
2703 	}
2704 }
2705 
2706 static int gfx_v12_1_hw_init(struct amdgpu_ip_block *ip_block)
2707 {
2708 	int r, i, num_xcc;
2709 	struct amdgpu_device *adev = ip_block->adev;
2710 
2711 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
2712 		/* rlc autoload firmware */
2713 		r = gfx_v12_1_rlc_backdoor_autoload_enable(adev);
2714 		if (r)
2715 			return r;
2716 	} else {
2717 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
2718 			num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2719 
2720 			if (adev->gfx.imu.funcs) {
2721 				if (adev->gfx.imu.funcs->load_microcode)
2722 					adev->gfx.imu.funcs->load_microcode(adev);
2723 			}
2724 
2725 			for (i = 0; i < num_xcc; i++) {
2726 				/* disable gpa mode in backdoor loading */
2727 				gfx_v12_1_xcc_disable_gpa_mode(adev, i);
2728 			}
2729 		}
2730 	}
2731 
2732 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) ||
2733 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2734 		r = gfx_v12_1_wait_for_rlc_autoload_complete(adev);
2735 		if (r) {
2736 			dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r);
2737 			return r;
2738 		}
2739 	}
2740 
2741 	adev->gfx.is_poweron = true;
2742 
2743 	if (get_gb_addr_config(adev))
2744 		DRM_WARN("Invalid gb_addr_config !\n");
2745 
2746 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2747 		gfx_v12_1_config_gfx_rs64(adev);
2748 
2749 	r = gfx_v12_1_gfxhub_enable(adev);
2750 	if (r)
2751 		return r;
2752 
2753 	gfx_v12_1_init_golden_registers(adev);
2754 
2755 	gfx_v12_1_constants_init(adev);
2756 
2757 	if (adev->nbio.funcs->gc_doorbell_init)
2758 		adev->nbio.funcs->gc_doorbell_init(adev);
2759 
2760 	r = gfx_v12_1_rlc_resume(adev);
2761 	if (r)
2762 		return r;
2763 
2764 	/*
2765 	 * init golden registers and rlc resume may override some registers,
2766 	 * reconfig them here
2767 	 */
2768 	gfx_v12_1_tcp_harvest(adev);
2769 
2770 	r = gfx_v12_1_cp_resume(adev);
2771 	if (r)
2772 		return r;
2773 
2774 	return r;
2775 }
2776 
2777 static void gfx_v12_1_xcc_fini(struct amdgpu_device *adev,
2778 			      int xcc_id)
2779 {
2780 	uint32_t tmp;
2781 
2782 	if (!adev->no_hw_access) {
2783 		if (amdgpu_gfx_disable_kcq(adev, xcc_id))
2784 			DRM_ERROR("KCQ disable failed\n");
2785 
2786 		amdgpu_mes_kiq_hw_fini(adev, xcc_id);
2787 	}
2788 
2789 	if (amdgpu_sriov_vf(adev)) {
2790 		/* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
2791 		tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
2792 		tmp &= 0xffffff00;
2793 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
2794 	}
2795 	gfx_v12_1_xcc_cp_compute_enable(adev, false, xcc_id);
2796 	gfx_v12_1_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
2797 }
2798 
2799 static int gfx_v12_1_set_userq_eop_interrupts(struct amdgpu_device *adev,
2800 					      bool enable)
2801 {
2802 	unsigned int irq_type;
2803 	int m, p, r;
2804 
2805 	if (adev->gfx.disable_kq) {
2806 		for (m = 0; m < adev->gfx.mec.num_mec; ++m) {
2807 			for (p = 0; p < adev->gfx.mec.num_pipe_per_mec; p++) {
2808 				irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
2809 					+ (m * adev->gfx.mec.num_pipe_per_mec)
2810 					+ p;
2811 				if (enable)
2812 					r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
2813 							   irq_type);
2814 				else
2815 					r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
2816 							   irq_type);
2817 				if (r)
2818 					return r;
2819 			}
2820 		}
2821 	}
2822 
2823 	return 0;
2824 }
2825 
2826 static int gfx_v12_1_hw_fini(struct amdgpu_ip_block *ip_block)
2827 {
2828 	struct amdgpu_device *adev = ip_block->adev;
2829 	int i, num_xcc;
2830 
2831 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
2832 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
2833 	gfx_v12_1_set_userq_eop_interrupts(adev, false);
2834 
2835 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2836 	for (i = 0; i < num_xcc; i++) {
2837 		gfx_v12_1_xcc_fini(adev, i);
2838 	}
2839 
2840 	adev->gfxhub.funcs->gart_disable(adev);
2841 
2842 	adev->gfx.is_poweron = false;
2843 
2844 	return 0;
2845 }
2846 
2847 static int gfx_v12_1_suspend(struct amdgpu_ip_block *ip_block)
2848 {
2849 	return gfx_v12_1_hw_fini(ip_block);
2850 }
2851 
2852 static int gfx_v12_1_resume(struct amdgpu_ip_block *ip_block)
2853 {
2854 	return gfx_v12_1_hw_init(ip_block);
2855 }
2856 
2857 static bool gfx_v12_1_is_idle(struct amdgpu_ip_block *ip_block)
2858 {
2859 	struct amdgpu_device *adev = ip_block->adev;
2860 	int i, num_xcc;
2861 
2862 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2863 	for (i = 0; i < num_xcc; i++) {
2864 		if (REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, i),
2865 				regGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE))
2866 			return false;
2867 	}
2868 	return true;
2869 }
2870 
2871 static int gfx_v12_1_wait_for_idle(struct amdgpu_ip_block *ip_block)
2872 {
2873 	unsigned i;
2874 	struct amdgpu_device *adev = ip_block->adev;
2875 
2876 	for (i = 0; i < adev->usec_timeout; i++) {
2877 		if (gfx_v12_1_is_idle(ip_block))
2878 			return 0;
2879 		udelay(1);
2880 	}
2881 	return -ETIMEDOUT;
2882 }
2883 
2884 static uint64_t gfx_v12_1_get_gpu_clock_counter(struct amdgpu_device *adev)
2885 {
2886 	uint64_t clock = 0;
2887 
2888 	if (adev->smuio.funcs &&
2889 	    adev->smuio.funcs->get_gpu_clock_counter)
2890 		clock = adev->smuio.funcs->get_gpu_clock_counter(adev);
2891 	else
2892 		dev_warn(adev->dev, "query gpu clock counter is not supported\n");
2893 
2894 	return clock;
2895 }
2896 
2897 static int gfx_v12_1_early_init(struct amdgpu_ip_block *ip_block)
2898 {
2899 	struct amdgpu_device *adev = ip_block->adev;
2900 
2901 
2902 	switch (amdgpu_user_queue) {
2903 	case -1:
2904 	default:
2905 		adev->gfx.disable_kq = true;
2906 		adev->gfx.disable_uq = true;
2907 		break;
2908 	case 0:
2909 		adev->gfx.disable_kq = false;
2910 		adev->gfx.disable_uq = true;
2911 		break;
2912 	}
2913 
2914 	adev->gfx.funcs = &gfx_v12_1_gfx_funcs;
2915 
2916 	if (adev->gfx.disable_kq)
2917 		adev->gfx.num_compute_rings = 0;
2918 	else
2919 		adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
2920 						  AMDGPU_MAX_COMPUTE_RINGS);
2921 
2922 	gfx_v12_1_set_kiq_pm4_funcs(adev);
2923 	gfx_v12_1_set_ring_funcs(adev);
2924 	gfx_v12_1_set_irq_funcs(adev);
2925 	gfx_v12_1_set_rlc_funcs(adev);
2926 	gfx_v12_1_set_mqd_funcs(adev);
2927 	gfx_v12_1_set_imu_funcs(adev);
2928 
2929 	gfx_v12_1_init_rlcg_reg_access_ctrl(adev);
2930 
2931 	return gfx_v12_1_init_microcode(adev);
2932 }
2933 
2934 static int gfx_v12_1_late_init(struct amdgpu_ip_block *ip_block)
2935 {
2936 	struct amdgpu_device *adev = ip_block->adev;
2937 	int r;
2938 
2939 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
2940 	if (r)
2941 		return r;
2942 
2943 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
2944 	if (r)
2945 		return r;
2946 
2947 	r = gfx_v12_1_set_userq_eop_interrupts(adev, true);
2948 	if (r)
2949 		return r;
2950 
2951 	return 0;
2952 }
2953 
2954 static bool gfx_v12_1_is_rlc_enabled(struct amdgpu_device *adev)
2955 {
2956 	uint32_t rlc_cntl;
2957 
2958 	/* if RLC is not enabled, do nothing */
2959 	rlc_cntl = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL);
2960 	return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
2961 }
2962 
2963 static void gfx_v12_1_xcc_set_safe_mode(struct amdgpu_device *adev,
2964 					int xcc_id)
2965 {
2966 	uint32_t data;
2967 	unsigned i;
2968 
2969 	data = RLC_SAFE_MODE__CMD_MASK;
2970 	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
2971 
2972 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
2973 
2974 	/* wait for RLC_SAFE_MODE */
2975 	for (i = 0; i < adev->usec_timeout; i++) {
2976 		if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2977 						regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
2978 			break;
2979 		udelay(1);
2980 	}
2981 }
2982 
2983 static void gfx_v12_1_xcc_unset_safe_mode(struct amdgpu_device *adev,
2984 					  int xcc_id)
2985 {
2986 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2987 		     regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK);
2988 }
2989 
2990 static void gfx_v12_1_update_perf_clk(struct amdgpu_device *adev,
2991 				      bool enable)
2992 {
2993 	int i, num_xcc;
2994 
2995 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2996 	for (i = 0; i < num_xcc; i++)
2997 		gfx_v12_1_xcc_update_perf_clk(adev, enable, i);
2998 }
2999 
3000 static void gfx_v12_1_update_spm_vmid(struct amdgpu_device *adev,
3001 				      int xcc_id,
3002 				      struct amdgpu_ring *ring,
3003 				      unsigned vmid)
3004 {
3005 	u32 reg, data;
3006 
3007 	reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL);
3008 	if (amdgpu_sriov_is_pp_one_vf(adev))
3009 		data = RREG32_NO_KIQ(reg);
3010 	else
3011 		data = RREG32(reg);
3012 
3013 	data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
3014 	data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
3015 
3016 	if (amdgpu_sriov_is_pp_one_vf(adev))
3017 		WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL, data);
3018 	else
3019 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL, data);
3020 
3021 	if (ring
3022 	    && amdgpu_sriov_is_pp_one_vf(adev)
3023 	    && ((ring->funcs->type == AMDGPU_RING_TYPE_GFX)
3024 		|| (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE))) {
3025 		uint32_t reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL);
3026 		amdgpu_ring_emit_wreg(ring, reg, data);
3027 	}
3028 }
3029 
3030 static const struct amdgpu_rlc_funcs gfx_v12_1_rlc_funcs = {
3031 	.is_rlc_enabled = gfx_v12_1_is_rlc_enabled,
3032 	.set_safe_mode = gfx_v12_1_xcc_set_safe_mode,
3033 	.unset_safe_mode = gfx_v12_1_xcc_unset_safe_mode,
3034 	.init = gfx_v12_1_rlc_init,
3035 	.get_csb_size = gfx_v12_1_get_csb_size,
3036 	.get_csb_buffer = gfx_v12_1_get_csb_buffer,
3037 	.resume = gfx_v12_1_rlc_resume,
3038 	.stop = gfx_v12_1_rlc_stop,
3039 	.reset = gfx_v12_1_rlc_reset,
3040 	.start = gfx_v12_1_rlc_start,
3041 	.update_spm_vmid = gfx_v12_1_update_spm_vmid,
3042 };
3043 
3044 #if 0
3045 static void gfx_v12_cntl_power_gating(struct amdgpu_device *adev, bool enable)
3046 {
3047 	/* TODO */
3048 }
3049 
3050 static void gfx_v12_cntl_pg(struct amdgpu_device *adev, bool enable)
3051 {
3052 	/* TODO */
3053 }
3054 #endif
3055 
3056 static int gfx_v12_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
3057 					   enum amd_powergating_state state)
3058 {
3059 	struct amdgpu_device *adev = ip_block->adev;
3060 	bool enable = (state == AMD_PG_STATE_GATE);
3061 
3062 	if (amdgpu_sriov_vf(adev))
3063 		return 0;
3064 
3065 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
3066 	case IP_VERSION(12, 1, 0):
3067 		amdgpu_gfx_off_ctrl(adev, enable);
3068 		break;
3069 	default:
3070 		break;
3071 	}
3072 
3073 	return 0;
3074 }
3075 
3076 static void gfx_v12_1_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
3077 							   bool enable, int xcc_id)
3078 {
3079 	uint32_t def, data;
3080 
3081 	if (!(adev->cg_flags &
3082 	      (AMD_CG_SUPPORT_GFX_CGCG |
3083 	      AMD_CG_SUPPORT_GFX_CGLS |
3084 	      AMD_CG_SUPPORT_GFX_3D_CGCG |
3085 	      AMD_CG_SUPPORT_GFX_3D_CGLS)))
3086 		return;
3087 
3088 	if (enable) {
3089 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
3090 					  regRLC_CGTT_MGCG_OVERRIDE);
3091 
3092 		/* unset CGCG override */
3093 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
3094 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
3095 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3096 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3097 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG ||
3098 		    adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
3099 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
3100 
3101 		/* update CGCG override bits */
3102 		if (def != data)
3103 			WREG32_SOC15(GC, GET_INST(GC, xcc_id),
3104 				     regRLC_CGTT_MGCG_OVERRIDE, data);
3105 
3106 		/* enable cgcg FSM(0x0000363F) */
3107 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
3108 
3109 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
3110 			data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK;
3111 			data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3112 				 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
3113 		}
3114 
3115 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
3116 			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK;
3117 			data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3118 				 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3119 		}
3120 
3121 		if (def != data)
3122 			WREG32_SOC15(GC, GET_INST(GC, xcc_id),
3123 				     regRLC_CGCG_CGLS_CTRL, data);
3124 
3125 		/* set IDLE_POLL_COUNT(0x00900100) */
3126 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL);
3127 
3128 		data &= ~CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK;
3129 		data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
3130 		data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3131 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3132 
3133 		if (def != data)
3134 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data);
3135 
3136 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL);
3137 		data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
3138 		data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
3139 		data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
3140 		data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
3141 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL, data);
3142 	} else {
3143 		/* Program RLC_CGCG_CGLS_CTRL */
3144 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
3145 
3146 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
3147 			data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
3148 
3149 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3150 			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3151 
3152 		if (def != data)
3153 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
3154 	}
3155 }
3156 
3157 static void gfx_v12_1_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev,
3158 							   bool enable, int xcc_id)
3159 {
3160 	uint32_t data, def;
3161 	if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)))
3162 		return;
3163 
3164 	/* It is disabled by HW by default */
3165 	if (enable) {
3166 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
3167 			/* 1 - RLC_CGTT_MGCG_OVERRIDE */
3168 			def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
3169 
3170 			data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3171 				  RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
3172 				  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
3173 
3174 			if (def != data)
3175 				WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
3176 		}
3177 	} else {
3178 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
3179 			def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
3180 
3181 			data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
3182 				 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3183 				 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
3184 
3185 			if (def != data)
3186 				WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
3187 		}
3188 	}
3189 }
3190 
3191 static void gfx_v12_1_xcc_update_repeater_fgcg(struct amdgpu_device *adev,
3192 					       bool enable, int xcc_id)
3193 {
3194 	uint32_t def, data;
3195 
3196 	if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
3197 		return;
3198 
3199 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
3200 
3201 	if (enable)
3202 		data &= ~(RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK |
3203 				  RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE_MASK);
3204 	else
3205 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK |
3206 				RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE_MASK;
3207 
3208 	if (def != data)
3209 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
3210 }
3211 
3212 static void gfx_v12_1_xcc_update_sram_fgcg(struct amdgpu_device *adev,
3213 					   bool enable, int xcc_id)
3214 {
3215 	uint32_t def, data;
3216 
3217 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
3218 		return;
3219 
3220 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
3221 
3222 	if (enable)
3223 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
3224 	else
3225 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
3226 
3227 	if (def != data)
3228 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
3229 }
3230 
3231 static void gfx_v12_1_xcc_update_perf_clk(struct amdgpu_device *adev,
3232 					  bool enable, int xcc_id)
3233 {
3234 	uint32_t def, data;
3235 
3236 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK))
3237 		return;
3238 
3239 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
3240 
3241 	if (enable)
3242 		data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
3243 	else
3244 		data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
3245 
3246 	if (def != data)
3247 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
3248 }
3249 
3250 static int gfx_v12_1_xcc_update_gfx_clock_gating(struct amdgpu_device *adev,
3251 					     bool enable, int xcc_id)
3252 {
3253 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
3254 
3255 	gfx_v12_1_xcc_update_coarse_grain_clock_gating(adev, enable, xcc_id);
3256 
3257 	gfx_v12_1_xcc_update_medium_grain_clock_gating(adev, enable, xcc_id);
3258 
3259 	gfx_v12_1_xcc_update_repeater_fgcg(adev, enable, xcc_id);
3260 
3261 	gfx_v12_1_xcc_update_sram_fgcg(adev, enable, xcc_id);
3262 
3263 	gfx_v12_1_xcc_update_perf_clk(adev, enable, xcc_id);
3264 
3265 	if (adev->cg_flags &
3266 	    (AMD_CG_SUPPORT_GFX_MGCG |
3267 	     AMD_CG_SUPPORT_GFX_CGLS |
3268 	     AMD_CG_SUPPORT_GFX_CGCG |
3269 	     AMD_CG_SUPPORT_GFX_3D_CGCG |
3270 	     AMD_CG_SUPPORT_GFX_3D_CGLS))
3271 		gfx_v12_1_xcc_enable_gui_idle_interrupt(adev, enable, xcc_id);
3272 
3273 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
3274 
3275 	return 0;
3276 }
3277 
3278 static int gfx_v12_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
3279 					   enum amd_clockgating_state state)
3280 {
3281 	struct amdgpu_device *adev = ip_block->adev;
3282 	int i, num_xcc;
3283 
3284 	if (amdgpu_sriov_vf(adev))
3285 		return 0;
3286 
3287 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3288 	switch (adev->ip_versions[GC_HWIP][0]) {
3289 	case IP_VERSION(12, 1, 0):
3290 		for (i = 0; i < num_xcc; i++)
3291 			gfx_v12_1_xcc_update_gfx_clock_gating(adev,
3292 				  state == AMD_CG_STATE_GATE, i);
3293 		break;
3294 	default:
3295 		break;
3296 	}
3297 
3298 	return 0;
3299 }
3300 
3301 static void gfx_v12_1_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
3302 {
3303 	struct amdgpu_device *adev = ip_block->adev;
3304 	int data;
3305 
3306 	/* AMD_CG_SUPPORT_GFX_MGCG */
3307 	data = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CGTT_MGCG_OVERRIDE);
3308 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
3309 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
3310 
3311 	/* AMD_CG_SUPPORT_REPEATER_FGCG */
3312 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK))
3313 		*flags |= AMD_CG_SUPPORT_REPEATER_FGCG;
3314 
3315 	/* AMD_CG_SUPPORT_GFX_FGCG */
3316 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK))
3317 		*flags |= AMD_CG_SUPPORT_GFX_FGCG;
3318 
3319 	/* AMD_CG_SUPPORT_GFX_PERF_CLK */
3320 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK))
3321 		*flags |= AMD_CG_SUPPORT_GFX_PERF_CLK;
3322 
3323 	/* AMD_CG_SUPPORT_GFX_CGCG */
3324 	data = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CGCG_CGLS_CTRL);
3325 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
3326 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
3327 
3328 	/* AMD_CG_SUPPORT_GFX_CGLS */
3329 	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
3330 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
3331 }
3332 
3333 static u64 gfx_v12_1_ring_get_rptr_compute(struct amdgpu_ring *ring)
3334 {
3335 	/* gfx12 hardware is 32bit rptr */
3336 	return *(uint32_t *)ring->rptr_cpu_addr;
3337 }
3338 
3339 static u64 gfx_v12_1_ring_get_wptr_compute(struct amdgpu_ring *ring)
3340 {
3341 	u64 wptr;
3342 
3343 	/* XXX check if swapping is necessary on BE */
3344 	if (ring->use_doorbell)
3345 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
3346 	else
3347 		BUG();
3348 	return wptr;
3349 }
3350 
3351 static void gfx_v12_1_ring_set_wptr_compute(struct amdgpu_ring *ring)
3352 {
3353 	struct amdgpu_device *adev = ring->adev;
3354 
3355 	/* XXX check if swapping is necessary on BE */
3356 	if (ring->use_doorbell) {
3357 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
3358 			     ring->wptr);
3359 		WDOORBELL64(ring->doorbell_index, ring->wptr);
3360 	} else {
3361 		BUG(); /* only DOORBELL method supported on gfx12 now */
3362 	}
3363 }
3364 
3365 static void gfx_v12_1_ring_emit_ib_compute(struct amdgpu_ring *ring,
3366 					   struct amdgpu_job *job,
3367 					   struct amdgpu_ib *ib,
3368 					   uint32_t flags)
3369 {
3370 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
3371 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
3372 
3373 	/* Currently, there is a high possibility to get wave ID mismatch
3374 	 * between ME and GDS, leading to a hw deadlock, because ME generates
3375 	 * different wave IDs than the GDS expects. This situation happens
3376 	 * randomly when at least 5 compute pipes use GDS ordered append.
3377 	 * The wave IDs generated by ME are also wrong after suspend/resume.
3378 	 * Those are probably bugs somewhere else in the kernel driver.
3379 	 *
3380 	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
3381 	 * GDS to 0 for this ring (me/pipe).
3382 	 */
3383 	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
3384 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3385 		amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
3386 	}
3387 
3388 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3389 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3390 	amdgpu_ring_write(ring,
3391 #ifdef __BIG_ENDIAN
3392 				(2 << 0) |
3393 #endif
3394 				lower_32_bits(ib->gpu_addr));
3395 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
3396 	amdgpu_ring_write(ring, control);
3397 }
3398 
3399 static void gfx_v12_1_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
3400 				     u64 seq, unsigned flags)
3401 {
3402 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
3403 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
3404 
3405 	/* RELEASE_MEM - flush caches, send int */
3406 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
3407 	amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ(1) |
3408 				 PACKET3_RELEASE_MEM_GCR_GLV_WB |
3409 				 PACKET3_RELEASE_MEM_GCR_GL2_WB |
3410 				 PACKET3_RELEASE_MEM_GCR_GL2_SCOPE(2) |
3411 				 PACKET3_RELEASE_MEM_TEMPORAL(3) |
3412 				 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
3413 				 PACKET3_RELEASE_MEM_EVENT_INDEX(5)));
3414 	amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) |
3415 				 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0)));
3416 
3417 	/*
3418 	 * the address should be Qword aligned if 64bit write, Dword
3419 	 * aligned if only send 32bit data low (discard data high)
3420 	 */
3421 	if (write64bit)
3422 		BUG_ON(addr & 0x7);
3423 	else
3424 		BUG_ON(addr & 0x3);
3425 	amdgpu_ring_write(ring, lower_32_bits(addr));
3426 	amdgpu_ring_write(ring, upper_32_bits(addr));
3427 	amdgpu_ring_write(ring, lower_32_bits(seq));
3428 	amdgpu_ring_write(ring, upper_32_bits(seq));
3429 	amdgpu_ring_write(ring, 0);
3430 }
3431 
3432 static void gfx_v12_1_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
3433 {
3434 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3435 	uint32_t seq = ring->fence_drv.sync_seq;
3436 	uint64_t addr = ring->fence_drv.gpu_addr;
3437 
3438 	gfx_v12_1_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr),
3439 			       upper_32_bits(addr), seq, 0xffffffff, 4);
3440 }
3441 
3442 static void gfx_v12_1_ring_invalidate_tlbs(struct amdgpu_ring *ring,
3443 				   uint16_t pasid, uint32_t flush_type,
3444 				   bool all_hub, uint8_t dst_sel)
3445 {
3446 	amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
3447 	amdgpu_ring_write(ring,
3448 			  PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) |
3449 			  PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
3450 			  PACKET3_INVALIDATE_TLBS_PASID(pasid) |
3451 			  PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
3452 }
3453 
3454 static void gfx_v12_1_ring_emit_vm_flush(struct amdgpu_ring *ring,
3455 					 unsigned vmid, uint64_t pd_addr)
3456 {
3457 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
3458 
3459 	/* compute doesn't have PFP */
3460 	if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
3461 		/* sync PFP to ME, otherwise we might get invalid PFP reads */
3462 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3463 		amdgpu_ring_write(ring, 0x0);
3464 	}
3465 }
3466 
3467 static void gfx_v12_1_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
3468 					  u64 seq, unsigned int flags)
3469 {
3470 	struct amdgpu_device *adev = ring->adev;
3471 
3472 	/* we only allocate 32bit for each seq wb address */
3473 	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
3474 
3475 	/* write fence seq to the "addr" */
3476 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3477 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3478 				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
3479 	amdgpu_ring_write(ring, lower_32_bits(addr));
3480 	amdgpu_ring_write(ring, upper_32_bits(addr));
3481 	amdgpu_ring_write(ring, lower_32_bits(seq));
3482 
3483 	if (flags & AMDGPU_FENCE_FLAG_INT) {
3484 		/* set register to trigger INT */
3485 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3486 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3487 					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
3488 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCPC_INT_STATUS));
3489 		amdgpu_ring_write(ring, 0);
3490 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
3491 	}
3492 }
3493 
3494 static void gfx_v12_1_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
3495 				     uint32_t reg_val_offs)
3496 {
3497 	struct amdgpu_device *adev = ring->adev;
3498 
3499 	reg = soc_v1_0_normalize_xcc_reg_offset(reg);
3500 
3501 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
3502 	amdgpu_ring_write(ring, 0 |	/* src: register*/
3503 				(5 << 8) |	/* dst: memory */
3504 				(1 << 20));	/* write confirm */
3505 	amdgpu_ring_write(ring, reg);
3506 	amdgpu_ring_write(ring, 0);
3507 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
3508 				reg_val_offs * 4));
3509 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
3510 				reg_val_offs * 4));
3511 }
3512 
3513 static void gfx_v12_1_ring_emit_wreg(struct amdgpu_ring *ring,
3514 				     uint32_t reg,
3515 				     uint32_t val)
3516 {
3517 	uint32_t cmd = 0;
3518 
3519 	reg = soc_v1_0_normalize_xcc_reg_offset(reg);
3520 
3521 	switch (ring->funcs->type) {
3522 	case AMDGPU_RING_TYPE_KIQ:
3523 		cmd = (1 << 16); /* no inc addr */
3524 		break;
3525 	default:
3526 		cmd = WR_CONFIRM;
3527 		break;
3528 	}
3529 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3530 	amdgpu_ring_write(ring, cmd);
3531 	amdgpu_ring_write(ring, reg);
3532 	amdgpu_ring_write(ring, 0);
3533 	amdgpu_ring_write(ring, val);
3534 }
3535 
3536 static void gfx_v12_1_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
3537 					uint32_t val, uint32_t mask)
3538 {
3539 	gfx_v12_1_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
3540 }
3541 
3542 static void gfx_v12_1_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
3543 						   uint32_t reg0, uint32_t reg1,
3544 						   uint32_t ref, uint32_t mask)
3545 {
3546 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3547 
3548 	gfx_v12_1_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
3549 			       ref, mask, 0x20);
3550 }
3551 
3552 static void gfx_v12_1_xcc_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
3553 							int me, int pipe,
3554 							enum amdgpu_interrupt_state state,
3555 							int xcc_id)
3556 {
3557 	u32 mec_int_cntl, mec_int_cntl_reg;
3558 
3559 	/*
3560 	 * amdgpu controls only the first MEC. That's why this function only
3561 	 * handles the setting of interrupts for this specific MEC. All other
3562 	 * pipes' interrupts are set by amdkfd.
3563 	 */
3564 
3565 	if (me == 1) {
3566 		switch (pipe) {
3567 		case 0:
3568 			mec_int_cntl_reg = SOC15_REG_OFFSET(
3569 					GC, GET_INST(GC, xcc_id),
3570 					regCP_ME1_PIPE0_INT_CNTL);
3571 			break;
3572 		case 1:
3573 			mec_int_cntl_reg = SOC15_REG_OFFSET(
3574 					GC, GET_INST(GC, xcc_id),
3575 					regCP_ME1_PIPE1_INT_CNTL);
3576 			break;
3577 		case 2:
3578 			mec_int_cntl_reg = SOC15_REG_OFFSET(
3579 					GC, GET_INST(GC, xcc_id),
3580 					regCP_ME1_PIPE2_INT_CNTL);
3581 			break;
3582 		case 3:
3583 			mec_int_cntl_reg = SOC15_REG_OFFSET(
3584 					GC, GET_INST(GC, xcc_id),
3585 					regCP_ME1_PIPE3_INT_CNTL);
3586 			break;
3587 		default:
3588 			DRM_DEBUG("invalid pipe %d\n", pipe);
3589 			return;
3590 		}
3591 	} else {
3592 		DRM_DEBUG("invalid me %d\n", me);
3593 		return;
3594 	}
3595 
3596 	switch (state) {
3597 	case AMDGPU_IRQ_STATE_DISABLE:
3598 		mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3599 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3600 					     TIME_STAMP_INT_ENABLE, 0);
3601 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3602 					     GENERIC0_INT_ENABLE, 0);
3603 		WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3604 		break;
3605 	case AMDGPU_IRQ_STATE_ENABLE:
3606 		mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3607 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3608 					     TIME_STAMP_INT_ENABLE, 1);
3609 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3610 					     GENERIC0_INT_ENABLE, 1);
3611 		WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3612 		break;
3613 	default:
3614 		break;
3615 	}
3616 }
3617 
3618 static int gfx_v12_1_set_eop_interrupt_state(struct amdgpu_device *adev,
3619 					    struct amdgpu_irq_src *src,
3620 					    unsigned type,
3621 					    enum amdgpu_interrupt_state state)
3622 {
3623 	int i, num_xcc;
3624 
3625 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3626 	for (i = 0; i < num_xcc; i++) {
3627 		switch (type) {
3628 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
3629 			gfx_v12_1_xcc_set_compute_eop_interrupt_state(
3630 					adev, 1, 0, state, i);
3631 			break;
3632 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
3633 			gfx_v12_1_xcc_set_compute_eop_interrupt_state(
3634 					adev, 1, 1, state, i);
3635 			break;
3636 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
3637 			gfx_v12_1_xcc_set_compute_eop_interrupt_state(
3638 					adev, 1, 2, state, i);
3639 			break;
3640 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
3641 			gfx_v12_1_xcc_set_compute_eop_interrupt_state(
3642 					adev, 1, 3, state, i);
3643 			break;
3644 		default:
3645 			break;
3646 		}
3647 	}
3648 
3649 	return 0;
3650 }
3651 
3652 static int gfx_v12_1_eop_irq(struct amdgpu_device *adev,
3653 			     struct amdgpu_irq_src *source,
3654 			     struct amdgpu_iv_entry *entry)
3655 {
3656 	u32 doorbell_offset = entry->src_data[0];
3657 	u8 me_id, pipe_id, queue_id;
3658 	struct amdgpu_ring *ring;
3659 	int i, xcc_id;
3660 
3661 	DRM_DEBUG("IH: CP EOP\n");
3662 
3663 	if (adev->enable_mes && doorbell_offset) {
3664 		struct amdgpu_userq_fence_driver *fence_drv = NULL;
3665 		struct xarray *xa = &adev->userq_xa;
3666 		unsigned long flags;
3667 
3668 		xa_lock_irqsave(xa, flags);
3669 		fence_drv = xa_load(xa, doorbell_offset);
3670 		if (fence_drv)
3671 			amdgpu_userq_fence_driver_process(fence_drv);
3672 		xa_unlock_irqrestore(xa, flags);
3673 	} else {
3674 		me_id = (entry->ring_id & 0x0c) >> 2;
3675 		pipe_id = (entry->ring_id & 0x03) >> 0;
3676 		queue_id = (entry->ring_id & 0x70) >> 4;
3677 		xcc_id = gfx_v12_1_ih_to_xcc_inst(adev, entry->node_id);
3678 
3679 		if (xcc_id == -EINVAL)
3680 			return -EINVAL;
3681 
3682 		switch (me_id) {
3683 		case 1:
3684 		case 2:
3685 			for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3686 				ring = &adev->gfx.compute_ring
3687 						[i +
3688 						 xcc_id * adev->gfx.num_compute_rings];
3689 				/* Per-queue interrupt is supported for MEC starting from VI.
3690 				 * The interrupt can only be enabled/disabled per pipe instead
3691 				 * of per queue.
3692 				 */
3693 				if ((ring->me == me_id) &&
3694 				    (ring->pipe == pipe_id) &&
3695 				    (ring->queue == queue_id))
3696 					amdgpu_fence_process(ring);
3697 			}
3698 			break;
3699 		default:
3700 			dev_dbg(adev->dev, "Unexpected me %d in eop_irq\n", me_id);
3701 			break;
3702 		}
3703 	}
3704 
3705 	return 0;
3706 }
3707 
3708 static int gfx_v12_1_set_priv_reg_fault_state(struct amdgpu_device *adev,
3709 					      struct amdgpu_irq_src *source,
3710 					      unsigned type,
3711 					      enum amdgpu_interrupt_state state)
3712 {
3713 	int i, num_xcc;
3714 
3715 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3716 	switch (state) {
3717 	case AMDGPU_IRQ_STATE_DISABLE:
3718 	case AMDGPU_IRQ_STATE_ENABLE:
3719 		for (i = 0; i < num_xcc; i++)
3720 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3721 					      PRIV_REG_INT_ENABLE,
3722 					      state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3723 		break;
3724 	default:
3725 		break;
3726 	}
3727 
3728 	return 0;
3729 }
3730 
3731 static int gfx_v12_1_set_priv_inst_fault_state(struct amdgpu_device *adev,
3732 					       struct amdgpu_irq_src *source,
3733 					       unsigned type,
3734 					       enum amdgpu_interrupt_state state)
3735 {
3736 	int i, num_xcc;
3737 
3738 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3739 	switch (state) {
3740 	case AMDGPU_IRQ_STATE_DISABLE:
3741 	case AMDGPU_IRQ_STATE_ENABLE:
3742 		for (i = 0; i < num_xcc; i++)
3743 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3744 				       PRIV_INSTR_INT_ENABLE,
3745 				       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3746 		break;
3747 	default:
3748 		break;
3749 	}
3750 
3751 	return 0;
3752 }
3753 
3754 static void gfx_v12_1_handle_priv_fault(struct amdgpu_device *adev,
3755 					struct amdgpu_iv_entry *entry)
3756 {
3757 	u8 me_id, pipe_id, queue_id;
3758 	struct amdgpu_ring *ring;
3759 	int i, xcc_id;
3760 
3761 	me_id = (entry->ring_id & 0x0c) >> 2;
3762 	pipe_id = (entry->ring_id & 0x03) >> 0;
3763 	queue_id = (entry->ring_id & 0x70) >> 4;
3764 	xcc_id = gfx_v12_1_ih_to_xcc_inst(adev, entry->node_id);
3765 
3766 	if (xcc_id == -EINVAL)
3767 		return;
3768 
3769 	if (!adev->gfx.disable_kq) {
3770 		switch (me_id) {
3771 		case 1:
3772 		case 2:
3773 			for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3774 				ring = &adev->gfx.compute_ring
3775 					[i +
3776 					 xcc_id * adev->gfx.num_compute_rings];
3777 				if (ring->me == me_id && ring->pipe == pipe_id &&
3778 				    ring->queue == queue_id)
3779 					drm_sched_fault(&ring->sched);
3780 			}
3781 			break;
3782 		default:
3783 			dev_dbg(adev->dev, "Unexpected me %d in priv_fault\n", me_id);
3784 			break;
3785 		}
3786 	}
3787 }
3788 
3789 static int gfx_v12_1_priv_reg_irq(struct amdgpu_device *adev,
3790 				  struct amdgpu_irq_src *source,
3791 				  struct amdgpu_iv_entry *entry)
3792 {
3793 	DRM_ERROR("Illegal register access in command stream\n");
3794 	gfx_v12_1_handle_priv_fault(adev, entry);
3795 	return 0;
3796 }
3797 
3798 static int gfx_v12_1_priv_inst_irq(struct amdgpu_device *adev,
3799 				   struct amdgpu_irq_src *source,
3800 				   struct amdgpu_iv_entry *entry)
3801 {
3802 	DRM_ERROR("Illegal instruction in command stream\n");
3803 	gfx_v12_1_handle_priv_fault(adev, entry);
3804 	return 0;
3805 }
3806 
3807 static void gfx_v12_1_emit_mem_sync(struct amdgpu_ring *ring)
3808 {
3809 	const unsigned int gcr_cntl =
3810 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) |
3811 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) |
3812 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) |
3813 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) |
3814 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1) |
3815 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_SCOPE(2);
3816 
3817 	/* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */
3818 	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6));
3819 	amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */
3820 	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
3821 	amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
3822 	amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
3823 	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
3824 	amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
3825 	amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
3826 }
3827 
3828 static const struct amd_ip_funcs gfx_v12_1_ip_funcs = {
3829 	.name = "gfx_v12_1",
3830 	.early_init = gfx_v12_1_early_init,
3831 	.late_init = gfx_v12_1_late_init,
3832 	.sw_init = gfx_v12_1_sw_init,
3833 	.sw_fini = gfx_v12_1_sw_fini,
3834 	.hw_init = gfx_v12_1_hw_init,
3835 	.hw_fini = gfx_v12_1_hw_fini,
3836 	.suspend = gfx_v12_1_suspend,
3837 	.resume = gfx_v12_1_resume,
3838 	.is_idle = gfx_v12_1_is_idle,
3839 	.wait_for_idle = gfx_v12_1_wait_for_idle,
3840 	.set_clockgating_state = gfx_v12_1_set_clockgating_state,
3841 	.set_powergating_state = gfx_v12_1_set_powergating_state,
3842 	.get_clockgating_state = gfx_v12_1_get_clockgating_state,
3843 };
3844 
3845 static const struct amdgpu_ring_funcs gfx_v12_1_ring_funcs_compute = {
3846 	.type = AMDGPU_RING_TYPE_COMPUTE,
3847 	.align_mask = 0xff,
3848 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
3849 	.support_64bit_ptrs = true,
3850 	.get_rptr = gfx_v12_1_ring_get_rptr_compute,
3851 	.get_wptr = gfx_v12_1_ring_get_wptr_compute,
3852 	.set_wptr = gfx_v12_1_ring_set_wptr_compute,
3853 	.emit_frame_size =
3854 		7 + /* gfx_v12_1_ring_emit_pipeline_sync */
3855 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
3856 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
3857 		2 + /* gfx_v12_1_ring_emit_vm_flush */
3858 		8 + 8 + 8 + /* gfx_v12_1_ring_emit_fence x3 for user fence, vm fence */
3859 		8, /* gfx_v12_1_emit_mem_sync */
3860 	.emit_ib_size =	7, /* gfx_v12_1_ring_emit_ib_compute */
3861 	.emit_ib = gfx_v12_1_ring_emit_ib_compute,
3862 	.emit_fence = gfx_v12_1_ring_emit_fence,
3863 	.emit_pipeline_sync = gfx_v12_1_ring_emit_pipeline_sync,
3864 	.emit_vm_flush = gfx_v12_1_ring_emit_vm_flush,
3865 	.test_ring = gfx_v12_1_ring_test_ring,
3866 	.test_ib = gfx_v12_1_ring_test_ib,
3867 	.insert_nop = amdgpu_ring_insert_nop,
3868 	.pad_ib = amdgpu_ring_generic_pad_ib,
3869 	.emit_wreg = gfx_v12_1_ring_emit_wreg,
3870 	.emit_reg_wait = gfx_v12_1_ring_emit_reg_wait,
3871 	.emit_reg_write_reg_wait = gfx_v12_1_ring_emit_reg_write_reg_wait,
3872 	.emit_mem_sync = gfx_v12_1_emit_mem_sync,
3873 };
3874 
3875 static const struct amdgpu_ring_funcs gfx_v12_1_ring_funcs_kiq = {
3876 	.type = AMDGPU_RING_TYPE_KIQ,
3877 	.align_mask = 0xff,
3878 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
3879 	.support_64bit_ptrs = true,
3880 	.get_rptr = gfx_v12_1_ring_get_rptr_compute,
3881 	.get_wptr = gfx_v12_1_ring_get_wptr_compute,
3882 	.set_wptr = gfx_v12_1_ring_set_wptr_compute,
3883 	.emit_frame_size =
3884 		7 + /* gfx_v12_1_ring_emit_pipeline_sync */
3885 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
3886 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
3887 		2 + /* gfx_v12_1_ring_emit_vm_flush */
3888 		8 + 8 + 8, /* gfx_v12_1_ring_emit_fence_kiq x3 for user fence, vm fence */
3889 	.emit_ib_size =	7, /* gfx_v12_1_ring_emit_ib_compute */
3890 	.emit_ib = gfx_v12_1_ring_emit_ib_compute,
3891 	.emit_fence = gfx_v12_1_ring_emit_fence_kiq,
3892 	.test_ring = gfx_v12_1_ring_test_ring,
3893 	.test_ib = gfx_v12_1_ring_test_ib,
3894 	.insert_nop = amdgpu_ring_insert_nop,
3895 	.pad_ib = amdgpu_ring_generic_pad_ib,
3896 	.emit_rreg = gfx_v12_1_ring_emit_rreg,
3897 	.emit_wreg = gfx_v12_1_ring_emit_wreg,
3898 	.emit_reg_wait = gfx_v12_1_ring_emit_reg_wait,
3899 	.emit_reg_write_reg_wait = gfx_v12_1_ring_emit_reg_write_reg_wait,
3900 };
3901 
3902 static void gfx_v12_1_set_ring_funcs(struct amdgpu_device *adev)
3903 {
3904 	int i, j, num_xcc;
3905 
3906 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3907 	for (i = 0; i < num_xcc; i++) {
3908 		adev->gfx.kiq[i].ring.funcs = &gfx_v12_1_ring_funcs_kiq;
3909 
3910 		for (j = 0; j < adev->gfx.num_compute_rings; j++)
3911 			adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs =
3912 						&gfx_v12_1_ring_funcs_compute;
3913 	}
3914 }
3915 
3916 static const struct amdgpu_irq_src_funcs gfx_v12_1_eop_irq_funcs = {
3917 	.set = gfx_v12_1_set_eop_interrupt_state,
3918 	.process = gfx_v12_1_eop_irq,
3919 };
3920 
3921 static const struct amdgpu_irq_src_funcs gfx_v12_1_priv_reg_irq_funcs = {
3922 	.set = gfx_v12_1_set_priv_reg_fault_state,
3923 	.process = gfx_v12_1_priv_reg_irq,
3924 };
3925 
3926 static const struct amdgpu_irq_src_funcs gfx_v12_1_priv_inst_irq_funcs = {
3927 	.set = gfx_v12_1_set_priv_inst_fault_state,
3928 	.process = gfx_v12_1_priv_inst_irq,
3929 };
3930 
3931 static void gfx_v12_1_set_irq_funcs(struct amdgpu_device *adev)
3932 {
3933 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
3934 	adev->gfx.eop_irq.funcs = &gfx_v12_1_eop_irq_funcs;
3935 
3936 	adev->gfx.priv_reg_irq.num_types = 1;
3937 	adev->gfx.priv_reg_irq.funcs = &gfx_v12_1_priv_reg_irq_funcs;
3938 
3939 	adev->gfx.priv_inst_irq.num_types = 1;
3940 	adev->gfx.priv_inst_irq.funcs = &gfx_v12_1_priv_inst_irq_funcs;
3941 }
3942 
3943 static void gfx_v12_1_set_imu_funcs(struct amdgpu_device *adev)
3944 {
3945 	if (adev->flags & AMD_IS_APU)
3946 		adev->gfx.imu.mode = MISSION_MODE;
3947 	else
3948 		adev->gfx.imu.mode = DEBUG_MODE;
3949 	if (!amdgpu_sriov_vf(adev))
3950 		adev->gfx.imu.funcs = &gfx_v12_1_imu_funcs;
3951 }
3952 
3953 static void gfx_v12_1_set_rlc_funcs(struct amdgpu_device *adev)
3954 {
3955 	adev->gfx.rlc.funcs = &gfx_v12_1_rlc_funcs;
3956 }
3957 
3958 static void gfx_v12_1_set_mqd_funcs(struct amdgpu_device *adev)
3959 {
3960 	/* set compute eng mqd */
3961 	adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size =
3962 		sizeof(struct v12_1_compute_mqd);
3963 	adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd =
3964 		gfx_v12_1_compute_mqd_init;
3965 }
3966 
3967 static void gfx_v12_1_set_user_cu_inactive_bitmap_per_sh(struct amdgpu_device *adev,
3968 							  u32 bitmap, int xcc_id)
3969 {
3970 	u32 data;
3971 
3972 	if (!bitmap)
3973 		return;
3974 
3975 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
3976 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
3977 
3978 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data);
3979 }
3980 
3981 static u32 gfx_v12_1_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev,
3982 						 int xcc_id)
3983 {
3984 	u32 data, mask;
3985 
3986 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG);
3987 	data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG);
3988 
3989 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
3990 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
3991 
3992 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
3993 
3994 	return (~data) & mask;
3995 }
3996 
3997 static int gfx_v12_1_get_cu_info(struct amdgpu_device *adev,
3998 				 struct amdgpu_cu_info *cu_info)
3999 {
4000 	int i, j, k, counter, xcc_id, active_cu_number = 0;
4001 	u32 mask, bitmap;
4002 	unsigned int disable_masks[2 * 2];
4003 
4004 	if (!adev || !cu_info)
4005 		return -EINVAL;
4006 
4007 	if (adev->gfx.config.max_shader_engines > 2 ||
4008 	    adev->gfx.config.max_sh_per_se > 2) {
4009 		dev_err(adev->dev,
4010 			"Max SE (%d) and Max SA per SE (%d) is greater than expected\n",
4011 			adev->gfx.config.max_shader_engines,
4012 			adev->gfx.config.max_sh_per_se);
4013 		return -EINVAL;
4014 	}
4015 
4016 	amdgpu_gfx_parse_disable_cu(adev, disable_masks,
4017 				    adev->gfx.config.max_shader_engines,
4018 				    adev->gfx.config.max_sh_per_se);
4019 
4020 	mutex_lock(&adev->grbm_idx_mutex);
4021 	for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
4022 		for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4023 			for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
4024 				bitmap = i * adev->gfx.config.max_sh_per_se + j;
4025 				if (!((gfx_v12_1_get_sa_active_bitmap(adev, xcc_id) >> bitmap) & 1))
4026 					continue;
4027 				mask = 1;
4028 				counter = 0;
4029 				gfx_v12_1_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id);
4030 				gfx_v12_1_set_user_cu_inactive_bitmap_per_sh(
4031 					adev,
4032 					disable_masks[i * adev->gfx.config.max_sh_per_se + j],
4033 					xcc_id);
4034 				bitmap = gfx_v12_1_get_cu_active_bitmap_per_sh(adev, xcc_id);
4035 
4036 				cu_info->bitmap[xcc_id][i][j] = bitmap;
4037 
4038 				for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
4039 					if (bitmap & mask)
4040 						counter++;
4041 
4042 					mask <<= 1;
4043 				}
4044 				active_cu_number += counter;
4045 			}
4046 		}
4047 		gfx_v12_1_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, xcc_id);
4048 	}
4049 	mutex_unlock(&adev->grbm_idx_mutex);
4050 
4051 	cu_info->number = active_cu_number;
4052 	cu_info->simd_per_cu = NUM_SIMD_PER_CU_GFX12_1;
4053 	cu_info->lds_size = 320;
4054 
4055 	return 0;
4056 }
4057 
4058 const struct amdgpu_ip_block_version gfx_v12_1_ip_block = {
4059 	.type = AMD_IP_BLOCK_TYPE_GFX,
4060 	.major = 12,
4061 	.minor = 1,
4062 	.rev = 0,
4063 	.funcs = &gfx_v12_1_ip_funcs,
4064 };
4065 
4066 static int gfx_v12_1_xcp_resume(void *handle, uint32_t inst_mask)
4067 {
4068 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4069 	uint32_t tmp_mask;
4070 	int i, r;
4071 
4072 	/* TODO : Initialize golden regs */
4073 	/* gfx_v12_1_init_golden_registers(adev); */
4074 
4075 	tmp_mask = inst_mask;
4076 	for_each_inst(i, tmp_mask)
4077 		gfx_v12_1_xcc_constants_init(adev, i);
4078 
4079 	if (!amdgpu_sriov_vf(adev)) {
4080 		tmp_mask = inst_mask;
4081 		for_each_inst(i, tmp_mask) {
4082 			r = gfx_v12_1_xcc_rlc_resume(adev, i);
4083 			if (r)
4084 				return r;
4085 		}
4086 	}
4087 
4088 	r = gfx_v12_1_xcc_cp_resume(adev, inst_mask);
4089 
4090 	return r;
4091 }
4092 
4093 static int gfx_v12_1_xcp_suspend(void *handle, uint32_t inst_mask)
4094 {
4095 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4096 	int i;
4097 
4098 	for_each_inst(i, inst_mask)
4099 		gfx_v12_1_xcc_fini(adev, i);
4100 
4101 	return 0;
4102 }
4103 
4104 struct amdgpu_xcp_ip_funcs gfx_v12_1_xcp_funcs = {
4105 	.suspend = &gfx_v12_1_xcp_suspend,
4106 	.resume = &gfx_v12_1_xcp_resume
4107 };
4108