xref: /linux/drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c (revision 5946dbe1c802efef3b12a4eecab1471f725f4ca9)
1 /*
2  * Copyright 2025 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/delay.h>
24 #include <linux/kernel.h>
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include "amdgpu.h"
29 #include "amdgpu_gfx.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_smu.h"
32 #include "amdgpu_atomfirmware.h"
33 #include "imu_v12_1.h"
34 #include "soc_v1_0.h"
35 #include "gfx_v12_1_pkt.h"
36 
37 #include "gc/gc_12_1_0_offset.h"
38 #include "gc/gc_12_1_0_sh_mask.h"
39 #include "soc24_enum.h"
40 #include "ivsrcid/gfx/irqsrcs_gfx_12_1_0.h"
41 
42 #include "soc15.h"
43 #include "clearstate_gfx12.h"
44 #include "v12_structs.h"
45 #include "gfx_v12_1.h"
46 #include "mes_v12_1.h"
47 
48 #define GFX12_MEC_HPD_SIZE	2048
49 #define NUM_SIMD_PER_CU_GFX12_1	4
50 
51 #define RLCG_UCODE_LOADING_START_ADDRESS	0x00002000L
52 
53 #define regCP_HQD_EOP_CONTROL_DEFAULT                                             0x00000000
54 #define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT                                     0x00000000
55 #define regCP_MQD_CONTROL_DEFAULT                                                 0x00000100
56 #define regCP_HQD_PQ_CONTROL_DEFAULT                                              0x00308509
57 #define regCP_HQD_PQ_RPTR_DEFAULT                                                 0x00000000
58 #define regCP_HQD_PERSISTENT_STATE_DEFAULT                                        0x0ae06301
59 #define regCP_HQD_IB_CONTROL_DEFAULT                                              0x00100000
60 
61 MODULE_FIRMWARE("amdgpu/gc_12_1_0_mec.bin");
62 MODULE_FIRMWARE("amdgpu/gc_12_1_0_rlc.bin");
63 
64 #define SH_MEM_ALIGNMENT_MODE_UNALIGNED_GFX12_1_0	0x00000001
65 #define DEFAULT_SH_MEM_CONFIG \
66 	((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
67 	 (SH_MEM_ALIGNMENT_MODE_UNALIGNED_GFX12_1_0 << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
68 	 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
69 
70 static void gfx_v12_1_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id);
71 static void gfx_v12_1_set_ring_funcs(struct amdgpu_device *adev);
72 static void gfx_v12_1_set_irq_funcs(struct amdgpu_device *adev);
73 static void gfx_v12_1_set_rlc_funcs(struct amdgpu_device *adev);
74 static void gfx_v12_1_set_mqd_funcs(struct amdgpu_device *adev);
75 static void gfx_v12_1_set_imu_funcs(struct amdgpu_device *adev);
76 static int gfx_v12_1_get_cu_info(struct amdgpu_device *adev,
77 				 struct amdgpu_cu_info *cu_info);
78 static uint64_t gfx_v12_1_get_gpu_clock_counter(struct amdgpu_device *adev);
79 static void gfx_v12_1_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num,
80 				       u32 sh_num, u32 instance, int xcc_id);
81 static void gfx_v12_1_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
82 				     uint32_t val);
83 static int gfx_v12_1_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
84 static void gfx_v12_1_ring_invalidate_tlbs(struct amdgpu_ring *ring,
85 					   uint16_t pasid, uint32_t flush_type,
86 					   bool all_hub, uint8_t dst_sel);
87 static void gfx_v12_1_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
88 static void gfx_v12_1_xcc_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
89 static void gfx_v12_1_update_perf_clk(struct amdgpu_device *adev,
90 				      bool enable);
91 static void gfx_v12_1_xcc_update_perf_clk(struct amdgpu_device *adev,
92 					 bool enable, int xcc_id);
93 static int gfx_v12_1_init_cp_compute_microcode_bo(struct amdgpu_device *adev);
94 
95 static void gfx_v12_1_kiq_set_resources(struct amdgpu_ring *kiq_ring,
96 					uint64_t queue_mask)
97 {
98 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
99 	amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
100 			  PACKET3_SET_RESOURCES_QUEUE_TYPE(0));	/* vmid_mask:0 queue_type:0 (KIQ) */
101 	amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask));	/* queue mask lo */
102 	amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask));	/* queue mask hi */
103 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask lo */
104 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask hi */
105 	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
106 	amdgpu_ring_write(kiq_ring, 0);
107 }
108 
109 static void gfx_v12_1_kiq_map_queues(struct amdgpu_ring *kiq_ring,
110 				     struct amdgpu_ring *ring)
111 {
112 	uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
113 	uint64_t wptr_addr = ring->wptr_gpu_addr;
114 	uint32_t me = 0, eng_sel = 0;
115 
116 	switch (ring->funcs->type) {
117 	case AMDGPU_RING_TYPE_COMPUTE:
118 		me = 1;
119 		eng_sel = 0;
120 		break;
121 	case AMDGPU_RING_TYPE_MES:
122 		me = 2;
123 		eng_sel = 5;
124 		break;
125 	default:
126 		WARN_ON(1);
127 	}
128 
129 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
130 	/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
131 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
132 			  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
133 			  PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
134 			  PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
135 			  PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
136 			  PACKET3_MAP_QUEUES_ME((me)) |
137 			  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
138 			  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
139 			  PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
140 			  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
141 	amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
142 	amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
143 	amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
144 	amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
145 	amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
146 }
147 
148 static void gfx_v12_1_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
149 				       struct amdgpu_ring *ring,
150 				       enum amdgpu_unmap_queues_action action,
151 				       u64 gpu_addr, u64 seq)
152 {
153 	struct amdgpu_device *adev = kiq_ring->adev;
154 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
155 
156 	if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
157 		amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr,
158 					      seq, kiq_ring->xcc_id);
159 		return;
160 	}
161 
162 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
163 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
164 			  PACKET3_UNMAP_QUEUES_ACTION(action) |
165 			  PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
166 			  PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
167 			  PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
168 	amdgpu_ring_write(kiq_ring,
169 		  PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
170 
171 	if (action == PREEMPT_QUEUES_NO_UNMAP) {
172 		amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
173 		amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
174 		amdgpu_ring_write(kiq_ring, seq);
175 	} else {
176 		amdgpu_ring_write(kiq_ring, 0);
177 		amdgpu_ring_write(kiq_ring, 0);
178 		amdgpu_ring_write(kiq_ring, 0);
179 	}
180 }
181 
182 static void gfx_v12_1_kiq_query_status(struct amdgpu_ring *kiq_ring,
183 				       struct amdgpu_ring *ring,
184 				       u64 addr, u64 seq)
185 {
186 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
187 
188 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
189 	amdgpu_ring_write(kiq_ring,
190 			  PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
191 			  PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
192 			  PACKET3_QUERY_STATUS_COMMAND(2));
193 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
194 			  PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
195 			  PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
196 	amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
197 	amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
198 	amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
199 	amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
200 }
201 
202 static void gfx_v12_1_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
203 					  uint16_t pasid,
204 					  uint32_t flush_type,
205 					  bool all_hub)
206 {
207 	gfx_v12_1_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1);
208 }
209 
210 static const struct kiq_pm4_funcs gfx_v12_1_kiq_pm4_funcs = {
211 	.kiq_set_resources = gfx_v12_1_kiq_set_resources,
212 	.kiq_map_queues = gfx_v12_1_kiq_map_queues,
213 	.kiq_unmap_queues = gfx_v12_1_kiq_unmap_queues,
214 	.kiq_query_status = gfx_v12_1_kiq_query_status,
215 	.kiq_invalidate_tlbs = gfx_v12_1_kiq_invalidate_tlbs,
216 	.set_resources_size = 8,
217 	.map_queues_size = 7,
218 	.unmap_queues_size = 6,
219 	.query_status_size = 7,
220 	.invalidate_tlbs_size = 2,
221 };
222 
223 static void gfx_v12_1_set_kiq_pm4_funcs(struct amdgpu_device *adev)
224 {
225 	int i, num_xcc;
226 
227 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
228 	for (i =0; i < num_xcc; i++)
229 		adev->gfx.kiq[i].pmf = &gfx_v12_1_kiq_pm4_funcs;
230 }
231 
232 static void gfx_v12_1_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
233 				   int mem_space, int opt, uint32_t addr0,
234 				   uint32_t addr1, uint32_t ref,
235 				   uint32_t mask, uint32_t inv)
236 {
237 	if (mem_space == 0) {
238 		addr0 = soc_v1_0_normalize_xcc_reg_offset(addr0);
239 		addr1 = soc_v1_0_normalize_xcc_reg_offset(addr1);
240 	}
241 
242 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
243 	amdgpu_ring_write(ring,
244 			  /* memory (1) or register (0) */
245 			  (WAIT_REG_MEM_MEM_SPACE(mem_space) |
246 			   WAIT_REG_MEM_OPERATION(opt) | /* wait */
247 			   WAIT_REG_MEM_FUNCTION(3) |  /* equal */
248 			   WAIT_REG_MEM_ENGINE(eng_sel)));
249 
250 	if (mem_space)
251 		BUG_ON(addr0 & 0x3); /* Dword align */
252 	amdgpu_ring_write(ring, addr0);
253 	amdgpu_ring_write(ring, addr1);
254 	amdgpu_ring_write(ring, ref);
255 	amdgpu_ring_write(ring, mask);
256 	amdgpu_ring_write(ring, inv); /* poll interval */
257 }
258 
259 static int gfx_v12_1_ring_test_ring(struct amdgpu_ring *ring)
260 {
261 	struct amdgpu_device *adev = ring->adev;
262 	uint32_t scratch_reg0_offset, xcc_offset;
263 	uint32_t tmp = 0;
264 	unsigned i;
265 	int r;
266 
267 	/* Use register offset which is local to XCC in the packet */
268 	xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
269 	scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0);
270 	WREG32(scratch_reg0_offset, 0xCAFEDEAD);
271 	tmp = RREG32(scratch_reg0_offset);
272 
273 	r = amdgpu_ring_alloc(ring, 5);
274 	if (r) {
275 		dev_err(adev->dev,
276 			"amdgpu: cp failed to lock ring %d (%d).\n",
277 			ring->idx, r);
278 		return r;
279 	}
280 
281 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
282 		gfx_v12_1_ring_emit_wreg(ring, xcc_offset, 0xDEADBEEF);
283 	} else {
284 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
285 		amdgpu_ring_write(ring, xcc_offset -
286 				  PACKET3_SET_UCONFIG_REG_START);
287 		amdgpu_ring_write(ring, 0xDEADBEEF);
288 	}
289 	amdgpu_ring_commit(ring);
290 
291 	for (i = 0; i < adev->usec_timeout; i++) {
292 		tmp = RREG32(scratch_reg0_offset);
293 		if (tmp == 0xDEADBEEF)
294 			break;
295 		if (amdgpu_emu_mode == 1)
296 			msleep(1);
297 		else
298 			udelay(1);
299 	}
300 
301 	if (i >= adev->usec_timeout)
302 		r = -ETIMEDOUT;
303 	return r;
304 }
305 
306 static int gfx_v12_1_ring_test_ib(struct amdgpu_ring *ring, long timeout)
307 {
308 	struct amdgpu_device *adev = ring->adev;
309 	struct amdgpu_ib ib;
310 	struct dma_fence *f = NULL;
311 	unsigned index;
312 	uint64_t gpu_addr;
313 	volatile uint32_t *cpu_ptr;
314 	long r;
315 
316 	/* MES KIQ fw hasn't indirect buffer support for now */
317 	if (adev->enable_mes_kiq &&
318 	    ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
319 		return 0;
320 
321 	memset(&ib, 0, sizeof(ib));
322 
323 	r = amdgpu_device_wb_get(adev, &index);
324 	if (r)
325 		return r;
326 
327 	gpu_addr = adev->wb.gpu_addr + (index * 4);
328 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
329 	cpu_ptr = &adev->wb.wb[index];
330 
331 	r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
332 	if (r) {
333 		dev_err(adev->dev, "amdgpu: failed to get ib (%ld).\n", r);
334 		goto err1;
335 	}
336 
337 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
338 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
339 	ib.ptr[2] = lower_32_bits(gpu_addr);
340 	ib.ptr[3] = upper_32_bits(gpu_addr);
341 	ib.ptr[4] = 0xDEADBEEF;
342 	ib.length_dw = 5;
343 
344 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
345 	if (r)
346 		goto err2;
347 
348 	r = dma_fence_wait_timeout(f, false, timeout);
349 	if (r == 0) {
350 		r = -ETIMEDOUT;
351 		goto err2;
352 	} else if (r < 0) {
353 		goto err2;
354 	}
355 
356 	if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF)
357 		r = 0;
358 	else
359 		r = -EINVAL;
360 err2:
361 	amdgpu_ib_free(&ib, NULL);
362 	dma_fence_put(f);
363 err1:
364 	amdgpu_device_wb_free(adev, index);
365 	return r;
366 }
367 
368 static void gfx_v12_1_free_microcode(struct amdgpu_device *adev)
369 {
370 	amdgpu_ucode_release(&adev->gfx.rlc_fw);
371 	amdgpu_ucode_release(&adev->gfx.mec_fw);
372 
373 	kfree(adev->gfx.rlc.register_list_format);
374 }
375 
376 static int gfx_v12_1_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix)
377 {
378 	const struct psp_firmware_header_v1_0 *toc_hdr;
379 	int err = 0;
380 
381 	err = amdgpu_ucode_request(adev, &adev->psp.toc_fw,
382 				   AMDGPU_UCODE_REQUIRED,
383 				   "amdgpu/%s_toc.bin", ucode_prefix);
384 	if (err)
385 		goto out;
386 
387 	toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
388 	adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
389 	adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
390 	adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
391 	adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
392 			le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
393 	return 0;
394 out:
395 	amdgpu_ucode_release(&adev->psp.toc_fw);
396 	return err;
397 }
398 
399 static int gfx_v12_1_init_microcode(struct amdgpu_device *adev)
400 {
401 	char ucode_prefix[15];
402 	int err;
403 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
404 	uint16_t version_major;
405 	uint16_t version_minor;
406 
407 	DRM_DEBUG("\n");
408 
409 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
410 
411 	if (!amdgpu_sriov_vf(adev)) {
412 		err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
413 					   AMDGPU_UCODE_REQUIRED,
414 					   "amdgpu/%s_rlc.bin", ucode_prefix);
415 		if (err)
416 			goto out;
417 		rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
418 		version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
419 		version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
420 		err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
421 		if (err)
422 			goto out;
423 	}
424 
425 	err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
426 				   AMDGPU_UCODE_REQUIRED,
427 				   "amdgpu/%s_mec.bin", ucode_prefix);
428 	if (err)
429 		goto out;
430 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC);
431 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK);
432 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK);
433 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK);
434 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK);
435 
436 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
437 		err = gfx_v12_1_init_toc_microcode(adev, ucode_prefix);
438 
439 	/* only one MEC for gfx 12 */
440 	adev->gfx.mec2_fw = NULL;
441 
442 	if (adev->gfx.imu.funcs) {
443 		if (adev->gfx.imu.funcs->init_microcode) {
444 			err = adev->gfx.imu.funcs->init_microcode(adev);
445 			if (err)
446 				dev_err(adev->dev, "Failed to load imu firmware!\n");
447 		}
448 	}
449 
450 out:
451 	if (err) {
452 		amdgpu_ucode_release(&adev->gfx.rlc_fw);
453 		amdgpu_ucode_release(&adev->gfx.mec_fw);
454 	}
455 
456 	return err;
457 }
458 
459 static u32 gfx_v12_1_get_csb_size(struct amdgpu_device *adev)
460 {
461 	u32 count = 0;
462 	const struct cs_section_def *sect = NULL;
463 	const struct cs_extent_def *ext = NULL;
464 
465 	count += 1;
466 
467 	for (sect = gfx12_cs_data; sect->section != NULL; ++sect) {
468 		if (sect->id == SECT_CONTEXT) {
469 			for (ext = sect->section; ext->extent != NULL; ++ext)
470 				count += 2 + ext->reg_count;
471 		} else
472 			return 0;
473 	}
474 
475 	return count;
476 }
477 
478 static void gfx_v12_1_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
479 {
480 	u32 count = 0, clustercount = 0, i;
481 	const struct cs_section_def *sect = NULL;
482 	const struct cs_extent_def *ext = NULL;
483 
484 	if (adev->gfx.rlc.cs_data == NULL)
485 		return;
486 	if (buffer == NULL)
487 		return;
488 
489 	count += 1;
490 
491 	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
492 		if (sect->id == SECT_CONTEXT) {
493 			for (ext = sect->section; ext->extent != NULL; ++ext) {
494 				clustercount++;
495 				buffer[count++] = ext->reg_count;
496 				buffer[count++] = ext->reg_index;
497 
498 				for (i = 0; i < ext->reg_count; i++)
499 					buffer[count++] = cpu_to_le32(ext->extent[i]);
500 			}
501 		} else
502 			return;
503 	}
504 
505 	buffer[0] = clustercount;
506 }
507 
508 static void gfx_v12_1_rlc_fini(struct amdgpu_device *adev)
509 {
510 	/* clear state block */
511 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
512 			&adev->gfx.rlc.clear_state_gpu_addr,
513 			(void **)&adev->gfx.rlc.cs_ptr);
514 
515 	/* jump table block */
516 	amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
517 			&adev->gfx.rlc.cp_table_gpu_addr,
518 			(void **)&adev->gfx.rlc.cp_table_ptr);
519 }
520 
521 static void gfx_v12_1_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
522 {
523 	int xcc_id, num_xcc;
524 	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
525 
526 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
527 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
528 		reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)];
529 
530 		reg_access_ctrl->grbm_cntl =
531 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL);
532 		reg_access_ctrl->grbm_idx =
533 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX);
534 
535 		reg_access_ctrl->vfi_cmd =
536 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_VFI_CMD);
537 		reg_access_ctrl->vfi_stat =
538 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_VFI_STAT);
539 		reg_access_ctrl->vfi_addr =
540 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_VFI_ADDR);
541 		reg_access_ctrl->vfi_data =
542 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_VFI_DATA);
543 		reg_access_ctrl->vfi_grbm_cntl =
544 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_VFI_GRBM_GFX_CNTL);
545 		reg_access_ctrl->vfi_grbm_idx =
546 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_VFI_GRBM_GFX_INDEX);
547 		reg_access_ctrl->vfi_grbm_cntl_data = 0;
548 		reg_access_ctrl->vfi_grbm_idx_data = 0;
549 	}
550 	adev->gfx.rlc.rlcg_reg_access_supported = true;
551 }
552 
553 static int gfx_v12_1_rlc_init(struct amdgpu_device *adev)
554 {
555 	const struct cs_section_def *cs_data;
556 	int r, i, num_xcc;
557 
558 	adev->gfx.rlc.cs_data = gfx12_cs_data;
559 
560 	cs_data = adev->gfx.rlc.cs_data;
561 
562 	if (cs_data) {
563 		/* init clear state block */
564 		r = amdgpu_gfx_rlc_init_csb(adev);
565 		if (r)
566 			return r;
567 	}
568 
569 	/* init spm vmid with 0xf */
570 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
571 	for (i = 0; i < num_xcc; i++) {
572 		if (adev->gfx.rlc.funcs->update_spm_vmid)
573 			adev->gfx.rlc.funcs->update_spm_vmid(adev, i, NULL, 0xf);
574 	}
575 
576 	return 0;
577 }
578 
579 static void gfx_v12_1_mec_fini(struct amdgpu_device *adev)
580 {
581 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
582 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
583 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL);
584 }
585 
586 static int gfx_v12_1_mec_init(struct amdgpu_device *adev)
587 {
588 	int r, i, num_xcc;
589 	u32 *hpd;
590 	size_t mec_hpd_size;
591 
592 	bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
593 
594 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
595 	for (i = 0; i < num_xcc; i++)
596 		bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap,
597 			    AMDGPU_MAX_COMPUTE_QUEUES);
598 
599 	/* take ownership of the relevant compute queues */
600 	amdgpu_gfx_compute_queue_acquire(adev);
601 	mec_hpd_size = adev->gfx.num_compute_rings *
602 		       GFX12_MEC_HPD_SIZE * num_xcc;
603 
604 	if (mec_hpd_size) {
605 		r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
606 					      AMDGPU_GEM_DOMAIN_GTT,
607 					      &adev->gfx.mec.hpd_eop_obj,
608 					      &adev->gfx.mec.hpd_eop_gpu_addr,
609 					      (void **)&hpd);
610 		if (r) {
611 			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
612 			gfx_v12_1_mec_fini(adev);
613 			return r;
614 		}
615 
616 		memset(hpd, 0, mec_hpd_size);
617 
618 		amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
619 		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
620 	}
621 
622 	return 0;
623 }
624 
625 static uint32_t wave_read_ind(struct amdgpu_device *adev,
626 			      uint32_t xcc_id, uint32_t wave,
627 			      uint32_t address)
628 {
629 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
630 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
631 		(address << SQ_IND_INDEX__INDEX__SHIFT));
632 	return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
633 }
634 
635 static void wave_read_regs(struct amdgpu_device *adev,
636 			   uint32_t xcc_id, uint32_t wave,
637 			   uint32_t thread, uint32_t regno,
638 			   uint32_t num, uint32_t *out)
639 {
640 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
641 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
642 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
643 		(thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
644 		(SQ_IND_INDEX__AUTO_INCR_MASK));
645 	while (num--)
646 		*(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
647 }
648 
649 static void gfx_v12_1_read_wave_data(struct amdgpu_device *adev,
650 				     uint32_t xcc_id,
651 				     uint32_t simd, uint32_t wave,
652 				     uint32_t *dst, int *no_fields)
653 {
654 	/* in gfx12 the SIMD_ID is specified as part of the INSTANCE
655 	 * field when performing a select_se_sh so it should be
656 	 * zero here */
657 	WARN_ON(simd != 0);
658 
659 	/* type 4 wave data */
660 	dst[(*no_fields)++] = 4;
661 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_STATUS);
662 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_PC_LO);
663 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_PC_HI);
664 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXEC_LO);
665 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXEC_HI);
666 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_HW_ID1);
667 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_HW_ID2);
668 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_GPR_ALLOC);
669 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_LDS_ALLOC);
670 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_IB_STS);
671 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_IB_STS2);
672 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_IB_DBG1);
673 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_M0);
674 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_MODE);
675 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_STATE_PRIV);
676 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXCP_FLAG_PRIV);
677 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXCP_FLAG_USER);
678 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_TRAP_CTRL);
679 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_ACTIVE);
680 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_VALID_AND_IDLE);
681 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_DVGPR_ALLOC_LO);
682 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_DVGPR_ALLOC_HI);
683 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_SCHED_MODE);
684 }
685 
686 static void gfx_v12_1_read_wave_sgprs(struct amdgpu_device *adev,
687 				      uint32_t xcc_id, uint32_t simd,
688 				      uint32_t wave, uint32_t start,
689 				      uint32_t size, uint32_t *dst)
690 {
691 	WARN_ON(simd != 0);
692 
693 	wave_read_regs(adev, xcc_id, wave, 0,
694 		       start + SQIND_WAVE_SGPRS_OFFSET,
695 		       size, dst);
696 }
697 
698 static void gfx_v12_1_read_wave_vgprs(struct amdgpu_device *adev,
699 				      uint32_t xcc_id, uint32_t simd,
700 				      uint32_t wave, uint32_t thread,
701 				      uint32_t start, uint32_t size,
702 				      uint32_t *dst)
703 {
704 	wave_read_regs(adev, xcc_id, wave, thread,
705 		       start + SQIND_WAVE_VGPRS_OFFSET,
706 		       size, dst);
707 }
708 
709 static void gfx_v12_1_select_me_pipe_q(struct amdgpu_device *adev,
710 				       u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
711 {
712 	soc_v1_0_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id));
713 }
714 
715 static int gfx_v12_1_get_xccs_per_xcp(struct amdgpu_device *adev)
716 {
717 	/* Fill this in when the interface is ready */
718 	return 1;
719 }
720 
721 static int gfx_v12_1_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node)
722 {
723 	int logic_xcc;
724 	int xcc = (ih_node & 0x7) - 2 + (ih_node >> 3) * 4;
725 
726 	for (logic_xcc = 0; logic_xcc < NUM_XCC(adev->gfx.xcc_mask); logic_xcc++) {
727 		if (xcc == GET_INST(GC, logic_xcc))
728 			return logic_xcc;
729 	}
730 
731 	dev_err(adev->dev, "Couldn't find xcc mapping from IH node");
732 	return -EINVAL;
733 }
734 
735 static const struct amdgpu_gfx_funcs gfx_v12_1_gfx_funcs = {
736 	.get_gpu_clock_counter = &gfx_v12_1_get_gpu_clock_counter,
737 	.select_se_sh = &gfx_v12_1_xcc_select_se_sh,
738 	.read_wave_data = &gfx_v12_1_read_wave_data,
739 	.read_wave_sgprs = &gfx_v12_1_read_wave_sgprs,
740 	.read_wave_vgprs = &gfx_v12_1_read_wave_vgprs,
741 	.select_me_pipe_q = &gfx_v12_1_select_me_pipe_q,
742 	.update_perfmon_mgcg = &gfx_v12_1_update_perf_clk,
743 	.get_xccs_per_xcp = &gfx_v12_1_get_xccs_per_xcp,
744 	.ih_node_to_logical_xcc = &gfx_v12_1_ih_to_xcc_inst,
745 };
746 
747 static int gfx_v12_1_gpu_early_init(struct amdgpu_device *adev)
748 {
749 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
750 	case IP_VERSION(12, 1, 0):
751 		adev->gfx.config.max_hw_contexts = 8;
752 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
753 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
754 		adev->gfx.config.sc_hiz_tile_fifo_size = 0;
755 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
756 		break;
757 	default:
758 		BUG();
759 		break;
760 	}
761 
762 	return 0;
763 }
764 
765 static int gfx_v12_1_compute_ring_init(struct amdgpu_device *adev, int ring_id,
766 				       int xcc_id, int mec, int pipe, int queue)
767 {
768 	int r;
769 	unsigned irq_type;
770 	struct amdgpu_ring *ring;
771 	unsigned int hw_prio;
772 	uint32_t xcc_doorbell_start;
773 
774 	ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings +
775 				       ring_id];
776 
777 	/* mec0 is me1 */
778 	ring->xcc_id = xcc_id;
779 	ring->me = mec + 1;
780 	ring->pipe = pipe;
781 	ring->queue = queue;
782 
783 	ring->ring_obj = NULL;
784 	ring->use_doorbell = true;
785 	xcc_doorbell_start = adev->doorbell_index.mec_ring0 +
786 			     xcc_id * adev->doorbell_index.xcc_doorbell_range;
787 	ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1;
788 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr +
789 			     (ring_id + xcc_id * adev->gfx.num_compute_rings) *
790 			     GFX12_MEC_HPD_SIZE;
791 	ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
792 	sprintf(ring->name, "comp_%d.%d.%d.%d",
793 			ring->xcc_id, ring->me, ring->pipe, ring->queue);
794 
795 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
796 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
797 		+ ring->pipe;
798 	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
799 			AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
800 	/* type-2 packets are deprecated on MEC, use type-3 instead */
801 	r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
802 			     hw_prio, NULL);
803 	if (r)
804 		return r;
805 
806 	return 0;
807 }
808 
809 static struct {
810 	SOC24_FIRMWARE_ID	id;
811 	unsigned int		offset;
812 	unsigned int		size;
813 	unsigned int		size_x16;
814 	unsigned int		num_inst;
815 } rlc_autoload_info[SOC24_FIRMWARE_ID_MAX];
816 
817 #define RLC_TOC_OFFSET_DWUNIT   8
818 #define RLC_SIZE_MULTIPLE       1024
819 #define RLC_TOC_UMF_SIZE_inM	23ULL
820 #define RLC_TOC_FORMAT_API	165ULL
821 
822 #define RLC_NUM_INS_CODE0   1
823 #define RLC_NUM_INS_CODE1   8
824 #define RLC_NUM_INS_CODE2   2
825 #define RLC_NUM_INS_CODE3   16
826 
827 static void gfx_v12_1_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc)
828 {
829 	RLC_TABLE_OF_CONTENT_V2 *ucode = rlc_toc;
830 
831 	while (ucode && (ucode->id > SOC24_FIRMWARE_ID_INVALID)) {
832 		rlc_autoload_info[ucode->id].id = ucode->id;
833 		rlc_autoload_info[ucode->id].offset =
834 			ucode->offset * RLC_TOC_OFFSET_DWUNIT * 4;
835 		rlc_autoload_info[ucode->id].size =
836 			ucode->size_x16 ? ucode->size * RLC_SIZE_MULTIPLE * 4 :
837 					  ucode->size * 4;
838 		switch (ucode->vfflr_image_code) {
839 		case 0:
840 			rlc_autoload_info[ucode->id].num_inst =
841 				RLC_NUM_INS_CODE0;
842 			break;
843 		case 1:
844 			rlc_autoload_info[ucode->id].num_inst =
845 				RLC_NUM_INS_CODE1;
846 			break;
847 		case 2:
848 			rlc_autoload_info[ucode->id].num_inst =
849 				RLC_NUM_INS_CODE2;
850 			break;
851 		case 3:
852 			rlc_autoload_info[ucode->id].num_inst =
853 				RLC_NUM_INS_CODE3;
854 			break;
855 		default:
856 			dev_err(adev->dev,
857 				"Invalid Instance number detected\n");
858 			break;
859 		}
860 		ucode++;
861 	}
862 }
863 
864 static uint32_t gfx_v12_1_calc_toc_total_size(struct amdgpu_device *adev)
865 {
866 	uint32_t total_size = 0;
867 	SOC24_FIRMWARE_ID id;
868 
869 	gfx_v12_1_parse_rlc_toc(adev, adev->psp.toc.start_addr);
870 
871 	for (id = SOC24_FIRMWARE_ID_RLC_G_UCODE; id < SOC24_FIRMWARE_ID_MAX; id++)
872 		total_size += rlc_autoload_info[id].size;
873 
874 	/* In case the offset in rlc toc ucode is aligned */
875 	if (total_size < rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].offset)
876 		total_size = rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].offset +
877 			rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].size;
878 	if (total_size < (RLC_TOC_UMF_SIZE_inM << 20))
879 		total_size = RLC_TOC_UMF_SIZE_inM << 20;
880 
881 	return total_size;
882 }
883 
884 static int gfx_v12_1_rlc_autoload_buffer_init(struct amdgpu_device *adev)
885 {
886 	int r;
887 	uint32_t total_size;
888 
889 	total_size = gfx_v12_1_calc_toc_total_size(adev);
890 
891 	r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024,
892 				      AMDGPU_GEM_DOMAIN_VRAM,
893 				      &adev->gfx.rlc.rlc_autoload_bo,
894 				      &adev->gfx.rlc.rlc_autoload_gpu_addr,
895 				      (void **)&adev->gfx.rlc.rlc_autoload_ptr);
896 
897 	if (r) {
898 		dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
899 		return r;
900 	}
901 
902 	return 0;
903 }
904 
905 static void gfx_v12_1_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev,
906 						       SOC24_FIRMWARE_ID id,
907 						       const void *fw_data,
908 						       uint32_t fw_size)
909 {
910 	uint32_t toc_offset;
911 	uint32_t toc_fw_size, toc_fw_inst_size;
912 	char *ptr = adev->gfx.rlc.rlc_autoload_ptr;
913 	int i, num_inst;
914 
915 	if (id <= SOC24_FIRMWARE_ID_INVALID || id >= SOC24_FIRMWARE_ID_MAX)
916 		return;
917 
918 	toc_offset = rlc_autoload_info[id].offset;
919 	toc_fw_size = rlc_autoload_info[id].size;
920 	num_inst = rlc_autoload_info[id].num_inst;
921 	toc_fw_inst_size = toc_fw_size / num_inst;
922 
923 	if (fw_size == 0)
924 		fw_size = toc_fw_inst_size;
925 
926 	if (fw_size > toc_fw_inst_size)
927 		fw_size = toc_fw_inst_size;
928 
929 	for (i = 0; i < num_inst; i++) {
930 		if ((num_inst == RLC_NUM_INS_CODE0) ||
931 		    ((1 << (i / 2)) & adev->gfx.xcc_mask)) {
932 			memcpy(ptr + toc_offset + i * toc_fw_inst_size, fw_data, fw_size);
933 
934 			if (fw_size < toc_fw_inst_size)
935 				memset(ptr + toc_offset + fw_size + i * toc_fw_inst_size,
936 				       0, toc_fw_inst_size - fw_size);
937 		}
938 	}
939 }
940 
941 static void
942 gfx_v12_1_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev)
943 {
944 	void *data;
945 	uint32_t size;
946 	uint32_t *toc_ptr;
947 
948 	data = adev->psp.toc.start_addr;
949 	size = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_TOC].size;
950 
951 	toc_ptr = (uint32_t *)data + size / 4 - 2;
952 	*toc_ptr = (RLC_TOC_FORMAT_API << 24) | 0x1;
953 
954 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_TOC,
955 						   data, size);
956 }
957 
958 static void
959 gfx_v12_1_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev)
960 {
961 	const __le32 *fw_data;
962 	uint32_t fw_size;
963 	const struct gfx_firmware_header_v2_0 *cpv2_hdr;
964 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
965 	const struct rlc_firmware_header_v2_1 *rlcv21_hdr;
966 	const struct rlc_firmware_header_v2_2 *rlcv22_hdr;
967 	uint16_t version_major, version_minor;
968 
969 	/* mec ucode */
970 	cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
971 		adev->gfx.mec_fw->data;
972 	/* instruction */
973 	fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
974 		le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
975 	fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
976 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC,
977 						   fw_data, fw_size);
978 	/* data */
979 	fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
980 		le32_to_cpu(cpv2_hdr->data_offset_bytes));
981 	fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
982 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P0_STACK,
983 						   fw_data, fw_size);
984 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P1_STACK,
985 						   fw_data, fw_size);
986 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P2_STACK,
987 						   fw_data, fw_size);
988 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P3_STACK,
989 						   fw_data, fw_size);
990 
991 	/* rlc ucode */
992 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)
993 		adev->gfx.rlc_fw->data;
994 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
995 			le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes));
996 	fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes);
997 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_G_UCODE,
998 						   fw_data, fw_size);
999 
1000 	version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1001 	version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1002 	if (version_major == 2) {
1003 		if (version_minor >= 1) {
1004 			rlcv21_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
1005 
1006 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1007 					le32_to_cpu(rlcv21_hdr->save_restore_list_gpm_offset_bytes));
1008 			fw_size = le32_to_cpu(rlcv21_hdr->save_restore_list_gpm_size_bytes);
1009 			gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLCG_SCRATCH,
1010 						   fw_data, fw_size);
1011 
1012 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1013 					le32_to_cpu(rlcv21_hdr->save_restore_list_srm_offset_bytes));
1014 			fw_size = le32_to_cpu(rlcv21_hdr->save_restore_list_srm_size_bytes);
1015 			gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_SRM_ARAM,
1016 						   fw_data, fw_size);
1017 		}
1018 		if (version_minor >= 2) {
1019 			rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1020 
1021 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1022 					le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes));
1023 			fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes);
1024 			gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLX6_UCODE,
1025 						   fw_data, fw_size);
1026 
1027 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1028 					le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes));
1029 			fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes);
1030 			gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLX6_DRAM_BOOT,
1031 						   fw_data, fw_size);
1032 		}
1033 	}
1034 }
1035 
1036 static void
1037 gfx_v12_1_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev)
1038 {
1039 	const __le32 *fw_data;
1040 	uint32_t fw_size;
1041 	const struct sdma_firmware_header_v3_0 *sdma_hdr;
1042 
1043 	if (adev->sdma.instance[0].fw) {
1044 		sdma_hdr = (const struct sdma_firmware_header_v3_0 *)
1045 			adev->sdma.instance[0].fw->data;
1046 		fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1047 				le32_to_cpu(sdma_hdr->ucode_offset_bytes));
1048 		fw_size = le32_to_cpu(sdma_hdr->ucode_size_bytes);
1049 
1050 		gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_SDMA_UCODE_TH0,
1051 							   fw_data, fw_size);
1052 	}
1053 }
1054 
1055 static void
1056 gfx_v12_1_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev)
1057 {
1058 	const __le32 *fw_data;
1059 	unsigned fw_size;
1060 	const struct mes_firmware_header_v1_0 *mes_hdr;
1061 	int pipe, ucode_id, data_id;
1062 
1063 	for (pipe = 0; pipe < 2; pipe++) {
1064 		if (pipe == 0) {
1065 			ucode_id = SOC24_FIRMWARE_ID_RS64_MES_P0;
1066 			data_id  = SOC24_FIRMWARE_ID_RS64_MES_P0_STACK;
1067 		} else {
1068 			ucode_id = SOC24_FIRMWARE_ID_RS64_MES_P1;
1069 			data_id  = SOC24_FIRMWARE_ID_RS64_MES_P1_STACK;
1070 		}
1071 
1072 		mes_hdr = (const struct mes_firmware_header_v1_0 *)
1073 			adev->mes.fw[pipe]->data;
1074 
1075 		fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1076 				le32_to_cpu(mes_hdr->mes_ucode_offset_bytes));
1077 		fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
1078 
1079 		gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, ucode_id, fw_data, fw_size);
1080 
1081 		fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1082 				le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
1083 		fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
1084 
1085 		gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, data_id, fw_data, fw_size);
1086 	}
1087 }
1088 
1089 static int gfx_v12_1_rlc_backdoor_autoload_enable(struct amdgpu_device *adev)
1090 {
1091 	uint32_t rlc_g_offset, rlc_g_size;
1092 	uint64_t gpu_addr;
1093 	uint32_t data;
1094 	int i, num_xcc;
1095 
1096 	/* RLC autoload sequence 2: copy ucode */
1097 	gfx_v12_1_rlc_backdoor_autoload_copy_sdma_ucode(adev);
1098 	gfx_v12_1_rlc_backdoor_autoload_copy_gfx_ucode(adev);
1099 	gfx_v12_1_rlc_backdoor_autoload_copy_mes_ucode(adev);
1100 	gfx_v12_1_rlc_backdoor_autoload_copy_toc_ucode(adev);
1101 
1102 	rlc_g_offset = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_G_UCODE].offset;
1103 	rlc_g_size = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_G_UCODE].size;
1104 	gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset - adev->gmc.vram_start;
1105 
1106 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1107 	for (i = 0; i < num_xcc; i++) {
1108 		WREG32_SOC15(GC, GET_INST(GC, i),
1109 			     regGFX_IMU_RLC_BOOTLOADER_ADDR_HI,
1110 			     upper_32_bits(gpu_addr));
1111 		WREG32_SOC15(GC, GET_INST(GC, i),
1112 			     regGFX_IMU_RLC_BOOTLOADER_ADDR_LO,
1113 			     lower_32_bits(gpu_addr));
1114 		WREG32_SOC15(GC, GET_INST(GC, i),
1115 			     regGFX_IMU_RLC_BOOTLOADER_SIZE,
1116 			     rlc_g_size);
1117 	}
1118 
1119 	if (adev->gfx.imu.funcs) {
1120 		/* RLC autoload sequence 3: load IMU fw */
1121 		if (adev->gfx.imu.funcs->load_microcode)
1122 			adev->gfx.imu.funcs->load_microcode(adev);
1123 	}
1124 
1125 	/* unhalt rlc to start autoload */
1126 	for (i = 0; i < num_xcc; i++) {
1127 		data = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_THREAD_ENABLE);
1128 		data = REG_SET_FIELD(data, RLC_GPM_THREAD_ENABLE, THREAD0_ENABLE, 1);
1129 		data = REG_SET_FIELD(data, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1);
1130 		WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_THREAD_ENABLE, data);
1131 		WREG32_SOC15(GC, GET_INST(GC, i), regRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK);
1132 	}
1133 
1134 	return 0;
1135 }
1136 
1137 static int gfx_v12_1_sw_init(struct amdgpu_ip_block *ip_block)
1138 {
1139 	int i, j, k, r, ring_id = 0;
1140 	unsigned num_compute_rings;
1141 	int xcc_id, num_xcc;
1142 	struct amdgpu_device *adev = ip_block->adev;
1143 
1144 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1145 	case IP_VERSION(12, 1, 0):
1146 		adev->gfx.mec.num_mec = 1;
1147 		adev->gfx.mec.num_pipe_per_mec = 4;
1148 		adev->gfx.mec.num_queue_per_pipe = 8;
1149 		break;
1150 	default:
1151 		adev->gfx.mec.num_mec = 2;
1152 		adev->gfx.mec.num_pipe_per_mec = 2;
1153 		adev->gfx.mec.num_queue_per_pipe = 4;
1154 		break;
1155 	}
1156 
1157 	/* recalculate compute rings to use based on hardware configuration */
1158 	num_compute_rings = (adev->gfx.mec.num_pipe_per_mec *
1159 			     adev->gfx.mec.num_queue_per_pipe) / 2;
1160 	adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings,
1161 					  num_compute_rings);
1162 
1163 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1164 
1165 	/* EOP Event */
1166 	r = amdgpu_irq_add_id(adev, SOC_V1_0_IH_CLIENTID_GRBM_CP,
1167 			      GFX_12_1_0__SRCID__CP_EOP_INTERRUPT,
1168 			      &adev->gfx.eop_irq);
1169 	if (r)
1170 		return r;
1171 
1172 	/* Privileged reg */
1173 	r = amdgpu_irq_add_id(adev, SOC_V1_0_IH_CLIENTID_GRBM_CP,
1174 			      GFX_12_1_0__SRCID__CP_PRIV_REG_FAULT,
1175 			      &adev->gfx.priv_reg_irq);
1176 	if (r)
1177 		return r;
1178 
1179 	/* Privileged inst */
1180 	r = amdgpu_irq_add_id(adev, SOC_V1_0_IH_CLIENTID_GRBM_CP,
1181 			      GFX_12_1_0__SRCID__CP_PRIV_INSTR_FAULT,
1182 			      &adev->gfx.priv_inst_irq);
1183 	if (r)
1184 		return r;
1185 
1186 	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1187 
1188 	r = gfx_v12_1_rlc_init(adev);
1189 	if (r) {
1190 		dev_err(adev->dev, "Failed to init rlc BOs!\n");
1191 		return r;
1192 	}
1193 
1194 	r = gfx_v12_1_mec_init(adev);
1195 	if (r) {
1196 		dev_err(adev->dev, "Failed to init MEC BOs!\n");
1197 		return r;
1198 	}
1199 
1200 	/* set up the compute queues - allocate horizontally across pipes */
1201 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1202 		ring_id = 0;
1203 		for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1204 			for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1205 				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1206 					if (!amdgpu_gfx_is_mec_queue_enabled(adev,
1207 								xcc_id, i, k, j))
1208 						continue;
1209 
1210 					r = gfx_v12_1_compute_ring_init(adev, ring_id,
1211 								xcc_id, i, k, j);
1212 					if (r)
1213 						return r;
1214 
1215 					ring_id++;
1216 				}
1217 			}
1218 		}
1219 
1220 		if (!adev->enable_mes_kiq) {
1221 			r = amdgpu_gfx_kiq_init(adev, GFX12_MEC_HPD_SIZE, xcc_id);
1222 			if (r) {
1223 				dev_err(adev->dev, "Failed to init KIQ BOs!\n");
1224 				return r;
1225 			}
1226 
1227 			r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
1228 			if (r)
1229 				return r;
1230 		}
1231 
1232 		r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v12_1_compute_mqd), xcc_id);
1233 		if (r)
1234 			return r;
1235 	}
1236 
1237 	/* allocate visible FB for rlc auto-loading fw */
1238 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1239 		r = gfx_v12_1_rlc_autoload_buffer_init(adev);
1240 		if (r)
1241 			return r;
1242 	} else if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1243 		r = gfx_v12_1_init_cp_compute_microcode_bo(adev);
1244 		if (r)
1245 			return r;
1246 	}
1247 
1248 	r = gfx_v12_1_gpu_early_init(adev);
1249 	if (r)
1250 		return r;
1251 
1252 	r = amdgpu_gfx_sysfs_init(adev);
1253 	if (r)
1254 		return r;
1255 
1256 	return 0;
1257 }
1258 
1259 static void gfx_v12_1_rlc_autoload_buffer_fini(struct amdgpu_device *adev)
1260 {
1261 	amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo,
1262 			&adev->gfx.rlc.rlc_autoload_gpu_addr,
1263 			(void **)&adev->gfx.rlc.rlc_autoload_ptr);
1264 }
1265 
1266 static int gfx_v12_1_sw_fini(struct amdgpu_ip_block *ip_block)
1267 {
1268 	int i, num_xcc;
1269 	struct amdgpu_device *adev = ip_block->adev;
1270 
1271 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1272 	for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++)
1273 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1274 
1275 	for (i = 0; i < num_xcc; i++) {
1276 		amdgpu_gfx_mqd_sw_fini(adev, i);
1277 
1278 		if (!adev->enable_mes_kiq) {
1279 			amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring);
1280 			amdgpu_gfx_kiq_fini(adev, i);
1281 		}
1282 	}
1283 
1284 	gfx_v12_1_rlc_fini(adev);
1285 	gfx_v12_1_mec_fini(adev);
1286 
1287 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
1288 		gfx_v12_1_rlc_autoload_buffer_fini(adev);
1289 
1290 	gfx_v12_1_free_microcode(adev);
1291 	amdgpu_gfx_sysfs_fini(adev);
1292 
1293 	return 0;
1294 }
1295 
1296 static void gfx_v12_1_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num,
1297 				       u32 sh_num, u32 instance, int xcc_id)
1298 {
1299 	u32 data;
1300 
1301 	if (instance == 0xffffffff)
1302 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
1303 				     INSTANCE_BROADCAST_WRITES, 1);
1304 	else
1305 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
1306 				     instance);
1307 
1308 	if (se_num == 0xffffffff)
1309 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
1310 				     1);
1311 	else
1312 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1313 
1314 	if (sh_num == 0xffffffff)
1315 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES,
1316 				     1);
1317 	else
1318 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num);
1319 
1320 	WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data);
1321 }
1322 
1323 static u32 gfx_v12_1_get_sa_active_bitmap(struct amdgpu_device *adev,
1324 					  int xcc_id)
1325 {
1326 	u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask;
1327 
1328 	gc_disabled_sa_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SA_UNIT_DISABLE);
1329 	gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask,
1330 					    CC_GC_SA_UNIT_DISABLE,
1331 					    SA_DISABLE);
1332 	gc_user_disabled_sa_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SA_UNIT_DISABLE);
1333 	gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask,
1334 						 GC_USER_SA_UNIT_DISABLE,
1335 						 SA_DISABLE);
1336 	sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se *
1337 					    adev->gfx.config.max_shader_engines);
1338 
1339 	return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask));
1340 }
1341 
1342 static u32 gfx_v12_1_get_rb_active_bitmap(struct amdgpu_device *adev,
1343 					  int xcc_id)
1344 {
1345 	u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask;
1346 	u32 rb_mask;
1347 
1348 	gc_disabled_rb_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
1349 					   regCC_RB_BACKEND_DISABLE);
1350 	gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask,
1351 					    CC_RB_BACKEND_DISABLE,
1352 					    BACKEND_DISABLE);
1353 	gc_user_disabled_rb_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
1354 						regGC_USER_RB_BACKEND_DISABLE);
1355 	gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask,
1356 						 GC_USER_RB_BACKEND_DISABLE,
1357 						 BACKEND_DISABLE);
1358 	rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se *
1359 					    adev->gfx.config.max_shader_engines);
1360 
1361 	return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask));
1362 }
1363 
1364 static void gfx_v12_1_setup_rb(struct amdgpu_device *adev)
1365 {
1366 	u32 rb_bitmap_width_per_sa;
1367 	u32 max_sa;
1368 	u32 active_sa_bitmap;
1369 	u32 global_active_rb_bitmap;
1370 	u32 active_rb_bitmap = 0;
1371 	u32 i;
1372 	int xcc_id;
1373 
1374 	for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
1375 		/* query sa bitmap from SA_UNIT_DISABLE registers */
1376 		active_sa_bitmap = gfx_v12_1_get_sa_active_bitmap(adev, xcc_id);
1377 		/* query rb bitmap from RB_BACKEND_DISABLE registers */
1378 		global_active_rb_bitmap = gfx_v12_1_get_rb_active_bitmap(adev, xcc_id);
1379 
1380 		/* generate active rb bitmap according to active sa bitmap */
1381 		max_sa = adev->gfx.config.max_shader_engines *
1382 			 adev->gfx.config.max_sh_per_se;
1383 		rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
1384 					 adev->gfx.config.max_sh_per_se;
1385 		for (i = 0; i < max_sa; i++) {
1386 			if (active_sa_bitmap & (1 << i))
1387 				active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
1388 		}
1389 
1390 		active_rb_bitmap |= global_active_rb_bitmap;
1391 	}
1392 
1393 	adev->gfx.config.backend_enable_mask = active_rb_bitmap;
1394 	adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
1395 }
1396 
1397 static void gfx_v12_1_xcc_init_compute_vmid(struct amdgpu_device *adev,
1398 					    int xcc_id)
1399 {
1400 	int i;
1401 	uint32_t sh_mem_bases;
1402 	uint32_t data;
1403 
1404 	/*
1405 	 * Configure apertures:
1406 	 * LDS:         0x20000000'00000000 - 0x20000001'00000000 (4GB)
1407 	 * Scratch:     0x10000000'00000000 - 0x10000001'00000000 (4GB)
1408 	 */
1409 	sh_mem_bases = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1410 				     (adev->gmc.private_aperture_start >> 58));
1411 	sh_mem_bases = REG_SET_FIELD(sh_mem_bases, SH_MEM_BASES, SHARED_BASE,
1412 				     (adev->gmc.shared_aperture_start >> 48));
1413 
1414 	mutex_lock(&adev->srbm_mutex);
1415 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1416 		soc_v1_0_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1417 		/* CP and shaders */
1418 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1419 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases);
1420 
1421 		/* Enable trap for each kfd vmid. */
1422 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL);
1423 		data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
1424 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data);
1425 
1426 		/* Disable VGPR deallocation instruction for each KFD vmid. */
1427 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_DEBUG);
1428 		data = REG_SET_FIELD(data, SQ_DEBUG, DISABLE_VGPR_DEALLOC, 1);
1429 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_DEBUG, data);
1430 	}
1431 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1432 	mutex_unlock(&adev->srbm_mutex);
1433 }
1434 
1435 static void gfx_v12_1_tcp_harvest(struct amdgpu_device *adev)
1436 {
1437 	/* TODO: harvest feature to be added later. */
1438 }
1439 
1440 static void gfx_v12_1_get_tcc_info(struct amdgpu_device *adev)
1441 {
1442 }
1443 
1444 static void gfx_v12_1_xcc_constants_init(struct amdgpu_device *adev,
1445 					 int xcc_id)
1446 {
1447 	u32 tmp;
1448 	int i;
1449 
1450 	/* XXX SH_MEM regs */
1451 	/* where to put LDS, scratch, GPUVM in FSA64 space */
1452 	mutex_lock(&adev->srbm_mutex);
1453 	for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
1454 		soc_v1_0_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1455 		/* CP and shaders */
1456 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1457 			     regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1458 		if (i != 0) {
1459 			tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1460 				(adev->gmc.private_aperture_start >> 58));
1461 			tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1462 				(adev->gmc.shared_aperture_start >> 48));
1463 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, tmp);
1464 		}
1465 	}
1466 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1467 
1468 	mutex_unlock(&adev->srbm_mutex);
1469 
1470 	gfx_v12_1_xcc_init_compute_vmid(adev, xcc_id);
1471 }
1472 
1473 static void gfx_v12_1_constants_init(struct amdgpu_device *adev)
1474 {
1475 	int i, num_xcc;
1476 
1477 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1478 
1479 	gfx_v12_1_setup_rb(adev);
1480 	gfx_v12_1_get_cu_info(adev, &adev->gfx.cu_info);
1481 	gfx_v12_1_get_tcc_info(adev);
1482 	adev->gfx.config.pa_sc_tile_steering_override = 0;
1483 
1484 	for (i = 0; i < num_xcc; i++)
1485 		gfx_v12_1_xcc_constants_init(adev, i);
1486 }
1487 
1488 static void gfx_v12_1_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1489 						    bool enable, int xcc_id)
1490 {
1491 	u32 tmp;
1492 
1493 	if (amdgpu_sriov_vf(adev))
1494 		return;
1495 
1496 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0);
1497 
1498 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
1499 			    enable ? 1 : 0);
1500 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
1501 			    enable ? 1 : 0);
1502 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
1503 			    enable ? 1 : 0);
1504 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
1505 			    enable ? 1 : 0);
1506 
1507 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp);
1508 }
1509 
1510 static int gfx_v12_1_xcc_init_csb(struct amdgpu_device *adev,
1511 				  int xcc_id)
1512 {
1513 	adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
1514 
1515 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CSIB_ADDR_HI,
1516 			adev->gfx.rlc.clear_state_gpu_addr >> 32);
1517 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CSIB_ADDR_LO,
1518 			adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1519 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1520 		     regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
1521 
1522 	return 0;
1523 }
1524 
1525 static void gfx_v12_1_xcc_rlc_stop(struct amdgpu_device *adev,
1526 				   int xcc_id)
1527 {
1528 	u32 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CNTL);
1529 
1530 	tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
1531 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CNTL, tmp);
1532 }
1533 
1534 static void gfx_v12_1_rlc_stop(struct amdgpu_device *adev)
1535 {
1536 	int i, num_xcc;
1537 
1538 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1539 	for (i = 0; i < num_xcc; i++)
1540 		gfx_v12_1_xcc_rlc_stop(adev, i);
1541 }
1542 
1543 static void gfx_v12_1_xcc_rlc_reset(struct amdgpu_device *adev,
1544 				    int xcc_id)
1545 {
1546 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id),
1547 			      GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
1548 	udelay(50);
1549 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id),
1550 			      GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
1551 	udelay(50);
1552 }
1553 
1554 static void gfx_v12_1_rlc_reset(struct amdgpu_device *adev)
1555 {
1556 	int i, num_xcc;
1557 
1558 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1559 	for (i = 0; i < num_xcc; i++)
1560 		gfx_v12_1_xcc_rlc_reset(adev, i);
1561 }
1562 
1563 static void gfx_v12_1_xcc_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
1564 						 bool enable, int xcc_id)
1565 {
1566 	uint32_t rlc_pg_cntl;
1567 
1568 	rlc_pg_cntl = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_PG_CNTL);
1569 
1570 	if (!enable) {
1571 		/* RLC_PG_CNTL[23] = 0 (default)
1572 		 * RLC will wait for handshake acks with SMU
1573 		 * GFXOFF will be enabled
1574 		 * RLC_PG_CNTL[23] = 1
1575 		 * RLC will not issue any message to SMU
1576 		 * hence no handshake between SMU & RLC
1577 		 * GFXOFF will be disabled
1578 		 */
1579 		rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
1580 	} else
1581 		rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
1582 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_PG_CNTL, rlc_pg_cntl);
1583 }
1584 
1585 static void gfx_v12_1_xcc_rlc_start(struct amdgpu_device *adev,
1586 				    int xcc_id)
1587 {
1588 	/* TODO: enable rlc & smu handshake until smu
1589 	 * and gfxoff feature works as expected */
1590 	if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK))
1591 		gfx_v12_1_xcc_rlc_smu_handshake_cntl(adev, false, xcc_id);
1592 
1593 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL, RLC_ENABLE_F32, 1);
1594 	udelay(50);
1595 }
1596 
1597 static void gfx_v12_1_rlc_start(struct amdgpu_device *adev)
1598 {
1599 	int i, num_xcc;
1600 
1601 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1602 	for (i = 0; i < num_xcc; i++) {
1603 		gfx_v12_1_xcc_rlc_start(adev, i);
1604 	}
1605 }
1606 
1607 static void gfx_v12_1_xcc_rlc_enable_srm(struct amdgpu_device *adev,
1608 					 int xcc_id)
1609 {
1610 	uint32_t tmp;
1611 
1612 	/* enable Save Restore Machine */
1613 	tmp = RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SRM_CNTL));
1614 	tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1615 	tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
1616 	WREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SRM_CNTL), tmp);
1617 }
1618 
1619 static void gfx_v12_1_xcc_load_rlcg_microcode(struct amdgpu_device *adev,
1620 					      int xcc_id)
1621 {
1622 	const struct rlc_firmware_header_v2_0 *hdr;
1623 	const __le32 *fw_data;
1624 	unsigned i, fw_size;
1625 
1626 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1627 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1628 			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1629 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1630 
1631 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR,
1632 		     RLCG_UCODE_LOADING_START_ADDRESS);
1633 
1634 	for (i = 0; i < fw_size; i++)
1635 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1636 			     regRLC_GPM_UCODE_DATA,
1637 			     le32_to_cpup(fw_data++));
1638 
1639 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1640 		     regRLC_GPM_UCODE_ADDR,
1641 		     adev->gfx.rlc_fw_version);
1642 }
1643 
1644 static void gfx_v12_1_xcc_load_rlc_iram_dram_microcode(struct amdgpu_device *adev,
1645 						       int xcc_id)
1646 {
1647 	const struct rlc_firmware_header_v2_2 *hdr;
1648 	const __le32 *fw_data;
1649 	unsigned i, fw_size;
1650 	u32 tmp;
1651 
1652 	hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1653 
1654 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1655 			le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes));
1656 	fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4;
1657 
1658 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_LX6_IRAM_ADDR, 0);
1659 
1660 	for (i = 0; i < fw_size; i++) {
1661 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1662 			msleep(1);
1663 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1664 			     regRLC_LX6_IRAM_DATA,
1665 			     le32_to_cpup(fw_data++));
1666 	}
1667 
1668 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1669 		     regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
1670 
1671 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1672 			le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes));
1673 	fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4;
1674 
1675 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1676 		     regRLC_LX6_DRAM_ADDR, 0);
1677 	for (i = 0; i < fw_size; i++) {
1678 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1679 			msleep(1);
1680 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1681 			     regRLC_LX6_DRAM_DATA,
1682 			     le32_to_cpup(fw_data++));
1683 	}
1684 
1685 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1686 		     regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
1687 
1688 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_LX6_CNTL);
1689 	tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1);
1690 	tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0);
1691 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_LX6_CNTL, tmp);
1692 }
1693 
1694 static int gfx_v12_1_xcc_rlc_load_microcode(struct amdgpu_device *adev,
1695 					    int xcc_id)
1696 {
1697 	const struct rlc_firmware_header_v2_0 *hdr;
1698 	uint16_t version_major;
1699 	uint16_t version_minor;
1700 
1701 	if (!adev->gfx.rlc_fw)
1702 		return -EINVAL;
1703 
1704 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1705 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
1706 
1707 	version_major = le16_to_cpu(hdr->header.header_version_major);
1708 	version_minor = le16_to_cpu(hdr->header.header_version_minor);
1709 
1710 	if (version_major == 2) {
1711 		gfx_v12_1_xcc_load_rlcg_microcode(adev, xcc_id);
1712 		if (amdgpu_dpm == 1) {
1713 			if (version_minor >= 2)
1714 				gfx_v12_1_xcc_load_rlc_iram_dram_microcode(adev, xcc_id);
1715 		}
1716 
1717 		return 0;
1718 	}
1719 
1720 	return -EINVAL;
1721 }
1722 
1723 static int gfx_v12_1_xcc_rlc_resume(struct amdgpu_device *adev,
1724 				    int xcc_id)
1725 {
1726 	int r;
1727 
1728 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1729 		gfx_v12_1_xcc_init_csb(adev, xcc_id);
1730 
1731 		if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
1732 			gfx_v12_1_xcc_rlc_enable_srm(adev, xcc_id);
1733 	} else {
1734 		if (amdgpu_sriov_vf(adev)) {
1735 			gfx_v12_1_xcc_init_csb(adev, xcc_id);
1736 			return 0;
1737 		}
1738 
1739 		gfx_v12_1_xcc_rlc_stop(adev, xcc_id);
1740 
1741 		/* disable CG */
1742 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0);
1743 
1744 		/* disable PG */
1745 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_PG_CNTL, 0);
1746 
1747 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1748 			/* legacy rlc firmware loading */
1749 			r = gfx_v12_1_xcc_rlc_load_microcode(adev, xcc_id);
1750 			if (r)
1751 				return r;
1752 		}
1753 
1754 		gfx_v12_1_xcc_init_csb(adev, xcc_id);
1755 
1756 		gfx_v12_1_xcc_rlc_start(adev, xcc_id);
1757 	}
1758 
1759 	return 0;
1760 }
1761 
1762 static int gfx_v12_1_rlc_resume(struct amdgpu_device *adev)
1763 {
1764 	int r, i, num_xcc;
1765 
1766 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1767 	for (i = 0; i < num_xcc; i++) {
1768 		r = gfx_v12_1_xcc_rlc_resume(adev, i);
1769 		if (r)
1770 			return r;
1771 	}
1772 
1773 	return 0;
1774 }
1775 
1776 static void gfx_v12_1_xcc_config_gfx_rs64(struct amdgpu_device *adev,
1777 					  int xcc_id)
1778 {
1779 	const struct gfx_firmware_header_v2_0 *mec_hdr;
1780 	uint32_t pipe_id, tmp;
1781 
1782 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)
1783 		adev->gfx.mec_fw->data;
1784 
1785 	/* config mec program start addr */
1786 	for (pipe_id = 0; pipe_id < 4; pipe_id++) {
1787 		soc_v1_0_grbm_select(adev, 1, pipe_id, 0, 0, GET_INST(GC, xcc_id));
1788 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START,
1789 					mec_hdr->ucode_start_addr_lo >> 2 |
1790 					mec_hdr->ucode_start_addr_hi << 30);
1791 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START_HI,
1792 					mec_hdr->ucode_start_addr_hi >> 2);
1793 	}
1794 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1795 
1796 	/* reset mec pipe */
1797 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL);
1798 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1);
1799 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1);
1800 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1);
1801 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1);
1802 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL, tmp);
1803 
1804 	/* clear mec pipe reset */
1805 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0);
1806 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0);
1807 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0);
1808 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0);
1809 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL, tmp);
1810 }
1811 
1812 static void gfx_v12_1_config_gfx_rs64(struct amdgpu_device *adev)
1813 {
1814 	int i, num_xcc;
1815 
1816 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1817 
1818 	for (i = 0; i < num_xcc; i++)
1819 		gfx_v12_1_xcc_config_gfx_rs64(adev, i);
1820 }
1821 
1822 static void gfx_v12_1_xcc_set_mec_ucode_start_addr(struct amdgpu_device *adev,
1823 						   int xcc_id)
1824 {
1825 	const struct gfx_firmware_header_v2_0 *cp_hdr;
1826 	unsigned pipe_id;
1827 
1828 	cp_hdr = (const struct gfx_firmware_header_v2_0 *)
1829 		adev->gfx.mec_fw->data;
1830 	mutex_lock(&adev->srbm_mutex);
1831 	for (pipe_id = 0; pipe_id < adev->gfx.mec.num_pipe_per_mec; pipe_id++) {
1832 		soc_v1_0_grbm_select(adev, 1, pipe_id, 0, 0, GET_INST(GC, xcc_id));
1833 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START,
1834 			     cp_hdr->ucode_start_addr_lo >> 2 |
1835 			     cp_hdr->ucode_start_addr_hi << 30);
1836 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START_HI,
1837 			     cp_hdr->ucode_start_addr_hi >> 2);
1838 	}
1839 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1840 	mutex_unlock(&adev->srbm_mutex);
1841 }
1842 
1843 static int gfx_v12_1_xcc_wait_for_rlc_autoload_complete(struct amdgpu_device *adev,
1844 							int xcc_id)
1845 {
1846 	uint32_t cp_status;
1847 	uint32_t bootload_status;
1848 	int i;
1849 
1850 	for (i = 0; i < adev->usec_timeout; i++) {
1851 		cp_status = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_STAT);
1852 		bootload_status = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
1853 					       regRLC_RLCS_BOOTLOAD_STATUS);
1854 
1855 		if ((cp_status == 0) &&
1856 		    (REG_GET_FIELD(bootload_status,
1857 			RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) {
1858 			break;
1859 		}
1860 		udelay(1);
1861 		if (amdgpu_emu_mode)
1862 			msleep(10);
1863 	}
1864 
1865 	if (i >= adev->usec_timeout) {
1866 		dev_err(adev->dev,
1867 			"rlc autoload: xcc%d gc ucode autoload timeout\n", xcc_id);
1868 		return -ETIMEDOUT;
1869 	}
1870 
1871 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1872 		gfx_v12_1_xcc_set_mec_ucode_start_addr(adev, xcc_id);
1873 	}
1874 
1875 	return 0;
1876 }
1877 
1878 static int gfx_v12_1_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
1879 {
1880 	int xcc_id;
1881 
1882 	for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++)
1883 		gfx_v12_1_xcc_wait_for_rlc_autoload_complete(adev, xcc_id);
1884 
1885 	return 0;
1886 }
1887 
1888 static void gfx_v12_1_xcc_cp_compute_enable(struct amdgpu_device *adev,
1889 					    bool enable, int xcc_id)
1890 {
1891 	u32 data;
1892 
1893 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL);
1894 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE,
1895 						 enable ? 0 : 1);
1896 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET,
1897 						 enable ? 0 : 1);
1898 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET,
1899 						 enable ? 0 : 1);
1900 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET,
1901 						 enable ? 0 : 1);
1902 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET,
1903 						 enable ? 0 : 1);
1904 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE,
1905 						 enable ? 1 : 0);
1906 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE,
1907 			                         enable ? 1 : 0);
1908 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE,
1909 						 enable ? 1 : 0);
1910 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE,
1911 						 enable ? 1 : 0);
1912 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT,
1913 						 enable ? 0 : 1);
1914 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL, data);
1915 
1916 	adev->gfx.kiq[xcc_id].ring.sched.ready = enable;
1917 
1918 	udelay(50);
1919 }
1920 
1921 static int gfx_v12_1_init_cp_compute_microcode_bo(struct amdgpu_device *adev)
1922 {
1923 	const struct gfx_firmware_header_v2_0 *mec_hdr;
1924 	const __le32 *fw_ucode, *fw_data;
1925 	u32 fw_ucode_size, fw_data_size;
1926 	u32 *fw_ucode_ptr, *fw_data_ptr;
1927 	int i, r, xcc_id;
1928 
1929 	if (!adev->gfx.mec_fw)
1930 		return -EINVAL;
1931 
1932 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
1933 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
1934 
1935 	fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data +
1936 				le32_to_cpu(mec_hdr->ucode_offset_bytes));
1937 	fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes);
1938 
1939 	fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1940 				le32_to_cpu(mec_hdr->data_offset_bytes));
1941 	fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes);
1942 
1943 	if (adev->gfx.mec.mec_fw_obj == NULL) {
1944 		r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
1945 					      64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
1946 					      &adev->gfx.mec.mec_fw_obj,
1947 					      &adev->gfx.mec.mec_fw_gpu_addr,
1948 					      (void **)&fw_ucode_ptr);
1949 		if (r) {
1950 			dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
1951 			gfx_v12_1_mec_fini(adev);
1952 			return r;
1953 		}
1954 
1955 		memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size);
1956 
1957 		amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1958 		amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1959 	}
1960 
1961 	if (adev->gfx.mec.mec_fw_data_obj == NULL) {
1962 		r = amdgpu_bo_create_reserved(adev,
1963 					      ALIGN(fw_data_size, 64 * 1024) *
1964 					      adev->gfx.mec.num_pipe_per_mec * NUM_XCC(adev->gfx.xcc_mask),
1965 					      64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
1966 					      &adev->gfx.mec.mec_fw_data_obj,
1967 					      &adev->gfx.mec.mec_fw_data_gpu_addr,
1968 					      (void **)&fw_data_ptr);
1969 		if (r) {
1970 			dev_err(adev->dev, "(%d) failed to create mec fw data bo\n", r);
1971 			gfx_v12_1_mec_fini(adev);
1972 			return r;
1973 		}
1974 
1975 		for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
1976 			for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
1977 				u32 offset = (xcc_id * adev->gfx.mec.num_pipe_per_mec + i) *
1978 					     ALIGN(fw_data_size, 64 * 1024) / 4;
1979 				memcpy(fw_data_ptr + offset, fw_data, fw_data_size);
1980 			}
1981 		}
1982 
1983 		amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj);
1984 		amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj);
1985 	}
1986 
1987 	return 0;
1988 }
1989 
1990 static int gfx_v12_1_xcc_cp_compute_load_microcode_rs64(struct amdgpu_device *adev,
1991 							int xcc_id)
1992 {
1993 	const struct gfx_firmware_header_v2_0 *mec_hdr;
1994 	u32 fw_data_size;
1995 	u32 tmp, i, usec_timeout = 50000; /* Wait for 50 ms */
1996 
1997 	if (!adev->gfx.mec_fw)
1998 		return -EINVAL;
1999 
2000 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
2001 	fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes);
2002 
2003 	gfx_v12_1_xcc_cp_compute_enable(adev, false, xcc_id);
2004 
2005 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL);
2006 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2007 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2008 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2009 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp);
2010 
2011 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_BASE_CNTL);
2012 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
2013 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
2014 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_BASE_CNTL, tmp);
2015 
2016 	mutex_lock(&adev->srbm_mutex);
2017 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
2018 		soc_v1_0_grbm_select(adev, 1, i, 0, 0, GET_INST(GC, xcc_id));
2019 
2020 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_MDBASE_LO,
2021 			     lower_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr +
2022 					   (xcc_id * adev->gfx.mec.num_pipe_per_mec + i) *
2023 					   ALIGN(fw_data_size, 64 * 1024)));
2024 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_MDBASE_HI,
2025 			     upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr +
2026 					   (xcc_id * adev->gfx.mec.num_pipe_per_mec + i) *
2027 					   ALIGN(fw_data_size, 64 * 1024)));
2028 
2029 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO,
2030 				lower_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2031 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI,
2032 				upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2033 	}
2034 	mutex_unlock(&adev->srbm_mutex);
2035 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2036 
2037 	/* Trigger an invalidation of the L1 instruction caches */
2038 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_OP_CNTL);
2039 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2040 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_OP_CNTL, tmp);
2041 
2042 	/* Wait for invalidation complete */
2043 	for (i = 0; i < usec_timeout; i++) {
2044 		tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_OP_CNTL);
2045 		if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
2046 				       INVALIDATE_DCACHE_COMPLETE))
2047 			break;
2048 		udelay(1);
2049 	}
2050 
2051 	if (i >= usec_timeout) {
2052 		dev_err(adev->dev, "failed to invalidate data cache\n");
2053 		return -EINVAL;
2054 	}
2055 
2056 	/* Trigger an invalidation of the L1 instruction caches */
2057 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_OP_CNTL);
2058 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2059 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_OP_CNTL, tmp);
2060 
2061 	/* Wait for invalidation complete */
2062 	for (i = 0; i < usec_timeout; i++) {
2063 		tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_OP_CNTL);
2064 		if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2065 				       INVALIDATE_CACHE_COMPLETE))
2066 			break;
2067 		udelay(1);
2068 	}
2069 
2070 	if (i >= usec_timeout) {
2071 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2072 		return -EINVAL;
2073 	}
2074 
2075 	gfx_v12_1_xcc_set_mec_ucode_start_addr(adev, xcc_id);
2076 
2077 	return 0;
2078 }
2079 
2080 static void gfx_v12_1_xcc_kiq_setting(struct amdgpu_ring *ring,
2081 				      int xcc_id)
2082 {
2083 	uint32_t tmp;
2084 	struct amdgpu_device *adev = ring->adev;
2085 
2086 	/* tell RLC which is KIQ queue */
2087 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
2088 	tmp &= 0xffffff00;
2089 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2090 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
2091 	tmp |= 0x80;
2092 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
2093 }
2094 
2095 static void gfx_v12_1_xcc_cp_set_doorbell_range(struct amdgpu_device *adev,
2096 						int xcc_id)
2097 {
2098 	/* disable gfx engine doorbell range */
2099 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_DOORBELL_RANGE_LOWER, 0);
2100 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_DOORBELL_RANGE_UPPER, 0);
2101 
2102 	/* set compute engine doorbell range */
2103 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DOORBELL_RANGE_LOWER,
2104 		     ((adev->doorbell_index.kiq +
2105 		       xcc_id * adev->doorbell_index.xcc_doorbell_range) *
2106 		      2) << 2);
2107 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DOORBELL_RANGE_UPPER,
2108 		     ((adev->doorbell_index.userqueue_end +
2109 		       xcc_id * adev->doorbell_index.xcc_doorbell_range) *
2110 		      2) << 2);
2111 }
2112 
2113 static int gfx_v12_1_compute_mqd_init(struct amdgpu_device *adev, void *m,
2114 				      struct amdgpu_mqd_prop *prop)
2115 {
2116 	struct v12_1_compute_mqd *mqd = m;
2117 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
2118 	uint32_t tmp;
2119 
2120 	mqd->header = 0xC0310800;
2121 	mqd->compute_pipelinestat_enable = 0x00000001;
2122 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
2123 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
2124 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
2125 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2126 	mqd->compute_misc_reserved = 0x00000007;
2127 
2128 	eop_base_addr = prop->eop_gpu_addr >> 8;
2129 	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
2130 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
2131 
2132 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2133 	tmp = regCP_HQD_EOP_CONTROL_DEFAULT;
2134 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
2135 			(order_base_2(GFX12_MEC_HPD_SIZE / 4) - 1));
2136 
2137 	mqd->cp_hqd_eop_control = tmp;
2138 
2139 	/* enable doorbell? */
2140 	tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
2141 
2142 	if (prop->use_doorbell) {
2143 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2144 				    DOORBELL_OFFSET, prop->doorbell_index);
2145 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2146 				    DOORBELL_EN, 1);
2147 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2148 				    DOORBELL_SOURCE, 0);
2149 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2150 				    DOORBELL_HIT, 0);
2151 	} else {
2152 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2153 				    DOORBELL_EN, 0);
2154 	}
2155 
2156 	mqd->cp_hqd_pq_doorbell_control = tmp;
2157 
2158 	/* disable the queue if it's active */
2159 	mqd->cp_hqd_dequeue_request = 0;
2160 	mqd->cp_hqd_pq_rptr = 0;
2161 	mqd->cp_hqd_pq_wptr_lo = 0;
2162 	mqd->cp_hqd_pq_wptr_hi = 0;
2163 
2164 	/* set the pointer to the MQD */
2165 	mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc;
2166 	mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
2167 
2168 	/* set MQD vmid to 0 */
2169 	tmp = regCP_MQD_CONTROL_DEFAULT;
2170 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
2171 	mqd->cp_mqd_control = tmp;
2172 
2173 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2174 	hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
2175 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
2176 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2177 
2178 	/* set up the HQD, this is similar to CP_RB0_CNTL */
2179 	tmp = regCP_HQD_PQ_CONTROL_DEFAULT;
2180 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
2181 			    (order_base_2(prop->queue_size / 4) - 1));
2182 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
2183 			    (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
2184 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
2185 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
2186 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
2187 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
2188 	mqd->cp_hqd_pq_control = tmp;
2189 
2190 	/* set the wb address whether it's enabled or not */
2191 	wb_gpu_addr = prop->rptr_gpu_addr;
2192 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
2193 	mqd->cp_hqd_pq_rptr_report_addr_hi =
2194 		upper_32_bits(wb_gpu_addr) & 0xffff;
2195 
2196 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2197 	wb_gpu_addr = prop->wptr_gpu_addr;
2198 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2199 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2200 
2201 	tmp = 0;
2202 	/* enable the doorbell if requested */
2203 	if (prop->use_doorbell) {
2204 		tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
2205 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2206 				DOORBELL_OFFSET, prop->doorbell_index);
2207 
2208 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2209 				    DOORBELL_EN, 1);
2210 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2211 				    DOORBELL_SOURCE, 0);
2212 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2213 				    DOORBELL_HIT, 0);
2214 	}
2215 
2216 	mqd->cp_hqd_pq_doorbell_control = tmp;
2217 
2218 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2219 	mqd->cp_hqd_pq_rptr = regCP_HQD_PQ_RPTR_DEFAULT;
2220 
2221 	/* set the vmid for the queue */
2222 	mqd->cp_hqd_vmid = 0;
2223 
2224 	tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT;
2225 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x63);
2226 	mqd->cp_hqd_persistent_state = tmp;
2227 
2228 	/* set MIN_IB_AVAIL_SIZE */
2229 	tmp = regCP_HQD_IB_CONTROL_DEFAULT;
2230 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 1);
2231 	mqd->cp_hqd_ib_control = tmp;
2232 
2233 	/* set static priority for a compute queue/ring */
2234 	mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority;
2235 	mqd->cp_hqd_queue_priority = prop->hqd_queue_priority;
2236 
2237 	mqd->cp_mqd_stride_size = prop->mqd_stride_size ? prop->mqd_stride_size :
2238 		sizeof(struct v12_1_compute_mqd);
2239 
2240 	mqd->cp_hqd_active = prop->hqd_active;
2241 
2242 	return 0;
2243 }
2244 
2245 static int gfx_v12_1_xcc_kiq_init_register(struct amdgpu_ring *ring,
2246 					   int xcc_id)
2247 {
2248 	struct amdgpu_device *adev = ring->adev;
2249 	struct v12_1_compute_mqd *mqd = ring->mqd_ptr;
2250 	int j;
2251 
2252 	/* inactivate the queue */
2253 	if (amdgpu_sriov_vf(adev))
2254 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0);
2255 
2256 	/* disable wptr polling */
2257 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
2258 
2259 	/* write the EOP addr */
2260 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR,
2261 	       mqd->cp_hqd_eop_base_addr_lo);
2262 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI,
2263 	       mqd->cp_hqd_eop_base_addr_hi);
2264 
2265 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2266 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL,
2267 	       mqd->cp_hqd_eop_control);
2268 
2269 	/* enable doorbell? */
2270 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
2271 	       mqd->cp_hqd_pq_doorbell_control);
2272 
2273 	/* disable the queue if it's active */
2274 	if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
2275 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
2276 		for (j = 0; j < adev->usec_timeout; j++) {
2277 			if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
2278 				break;
2279 			udelay(1);
2280 		}
2281 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
2282 		       mqd->cp_hqd_dequeue_request);
2283 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR,
2284 		       mqd->cp_hqd_pq_rptr);
2285 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
2286 		       mqd->cp_hqd_pq_wptr_lo);
2287 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
2288 		       mqd->cp_hqd_pq_wptr_hi);
2289 	}
2290 
2291 	/* set the pointer to the MQD */
2292 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR,
2293 	       mqd->cp_mqd_base_addr_lo);
2294 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI,
2295 	       mqd->cp_mqd_base_addr_hi);
2296 
2297 	/* set MQD vmid to 0 */
2298 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL,
2299 	       mqd->cp_mqd_control);
2300 
2301 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2302 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE,
2303 	       mqd->cp_hqd_pq_base_lo);
2304 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI,
2305 	       mqd->cp_hqd_pq_base_hi);
2306 
2307 	/* set up the HQD, this is similar to CP_RB0_CNTL */
2308 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL,
2309 	       mqd->cp_hqd_pq_control);
2310 
2311 	/* set the wb address whether it's enabled or not */
2312 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR,
2313 		mqd->cp_hqd_pq_rptr_report_addr_lo);
2314 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
2315 		mqd->cp_hqd_pq_rptr_report_addr_hi);
2316 
2317 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2318 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR,
2319 	       mqd->cp_hqd_pq_wptr_poll_addr_lo);
2320 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
2321 	       mqd->cp_hqd_pq_wptr_poll_addr_hi);
2322 
2323 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
2324 	       mqd->cp_hqd_pq_doorbell_control);
2325 
2326 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2327 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
2328 	       mqd->cp_hqd_pq_wptr_lo);
2329 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
2330 	       mqd->cp_hqd_pq_wptr_hi);
2331 
2332 	/* set the vmid for the queue */
2333 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_VMID, mqd->cp_hqd_vmid);
2334 
2335 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE,
2336 	       mqd->cp_hqd_persistent_state);
2337 
2338 	/* activate the queue */
2339 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE,
2340 	       mqd->cp_hqd_active);
2341 
2342 	if (ring->use_doorbell)
2343 		WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_STATUS, DOORBELL_ENABLE, 1);
2344 
2345 	return 0;
2346 }
2347 
2348 static int gfx_v12_1_xcc_kiq_init_queue(struct amdgpu_ring *ring,
2349 					int xcc_id)
2350 {
2351 	struct amdgpu_device *adev = ring->adev;
2352 	struct v12_1_compute_mqd *mqd = ring->mqd_ptr;
2353 
2354 	gfx_v12_1_xcc_kiq_setting(ring, xcc_id);
2355 
2356 	if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
2357 		/* reset MQD to a clean status */
2358 		if (adev->gfx.kiq[xcc_id].mqd_backup)
2359 			memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(*mqd));
2360 
2361 		/* reset ring buffer */
2362 		ring->wptr = 0;
2363 		amdgpu_ring_clear_ring(ring);
2364 
2365 		mutex_lock(&adev->srbm_mutex);
2366 		soc_v1_0_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2367 		gfx_v12_1_xcc_kiq_init_register(ring, xcc_id);
2368 		soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2369 		mutex_unlock(&adev->srbm_mutex);
2370 	} else {
2371 		memset((void *)mqd, 0, sizeof(*mqd));
2372 		if (amdgpu_sriov_vf(adev) && adev->in_suspend)
2373 			amdgpu_ring_clear_ring(ring);
2374 		mutex_lock(&adev->srbm_mutex);
2375 		soc_v1_0_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2376 		amdgpu_ring_init_mqd(ring);
2377 		gfx_v12_1_xcc_kiq_init_register(ring, xcc_id);
2378 		soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2379 		mutex_unlock(&adev->srbm_mutex);
2380 
2381 		if (adev->gfx.kiq[xcc_id].mqd_backup)
2382 			memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(*mqd));
2383 	}
2384 
2385 	return 0;
2386 }
2387 
2388 static int gfx_v12_1_xcc_kcq_init_queue(struct amdgpu_ring *ring,
2389 					int xcc_id)
2390 {
2391 	struct amdgpu_device *adev = ring->adev;
2392 	struct v12_1_compute_mqd *mqd = ring->mqd_ptr;
2393 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
2394 
2395 	if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2396 		memset((void *)mqd, 0, sizeof(*mqd));
2397 		mutex_lock(&adev->srbm_mutex);
2398 		soc_v1_0_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2399 		amdgpu_ring_init_mqd(ring);
2400 		soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2401 		mutex_unlock(&adev->srbm_mutex);
2402 
2403 		if (adev->gfx.mec.mqd_backup[mqd_idx])
2404 			memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
2405 	} else {
2406 		/* restore MQD to a clean status */
2407 		if (adev->gfx.mec.mqd_backup[mqd_idx])
2408 			memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
2409 		/* reset ring buffer */
2410 		ring->wptr = 0;
2411 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
2412 		amdgpu_ring_clear_ring(ring);
2413 	}
2414 
2415 	return 0;
2416 }
2417 
2418 static int gfx_v12_1_xcc_kiq_resume(struct amdgpu_device *adev,
2419 				    int xcc_id)
2420 {
2421 	struct amdgpu_ring *ring;
2422 	int r;
2423 
2424 	ring = &adev->gfx.kiq[xcc_id].ring;
2425 
2426 	r = amdgpu_bo_reserve(ring->mqd_obj, false);
2427 	if (unlikely(r != 0))
2428 		return r;
2429 
2430 	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2431 	if (unlikely(r != 0)) {
2432 		amdgpu_bo_unreserve(ring->mqd_obj);
2433 		return r;
2434 	}
2435 
2436 	gfx_v12_1_xcc_kiq_init_queue(ring, xcc_id);
2437 	amdgpu_bo_kunmap(ring->mqd_obj);
2438 	ring->mqd_ptr = NULL;
2439 	amdgpu_bo_unreserve(ring->mqd_obj);
2440 	ring->sched.ready = true;
2441 	return 0;
2442 }
2443 
2444 static int gfx_v12_1_xcc_kcq_resume(struct amdgpu_device *adev,
2445 				    int xcc_id)
2446 {
2447 	struct amdgpu_ring *ring = NULL;
2448 	int r = 0, i;
2449 
2450 	if (!amdgpu_async_gfx_ring)
2451 		gfx_v12_1_xcc_cp_compute_enable(adev, true, xcc_id);
2452 
2453 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2454 		ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
2455 
2456 		r = amdgpu_bo_reserve(ring->mqd_obj, false);
2457 		if (unlikely(r != 0))
2458 			goto done;
2459 		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2460 		if (!r) {
2461 			r = gfx_v12_1_xcc_kcq_init_queue(ring, xcc_id);
2462 			amdgpu_bo_kunmap(ring->mqd_obj);
2463 			ring->mqd_ptr = NULL;
2464 		}
2465 		amdgpu_bo_unreserve(ring->mqd_obj);
2466 		if (r)
2467 			goto done;
2468 	}
2469 
2470 	r = amdgpu_gfx_enable_kcq(adev, xcc_id);
2471 done:
2472 	return r;
2473 }
2474 
2475 static int gfx_v12_1_xcc_cp_resume(struct amdgpu_device *adev, uint16_t xcc_mask)
2476 {
2477 	int r, i, xcc_id;
2478 	struct amdgpu_ring *ring;
2479 
2480 	for_each_inst(xcc_id, xcc_mask) {
2481 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
2482 			/* legacy firmware loading */
2483 			r = gfx_v12_1_xcc_cp_compute_load_microcode_rs64(adev, xcc_id);
2484 			if (r)
2485 				return r;
2486 		}
2487 
2488 		/* GFX CGCG and LS is set by default */
2489 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2490 			gfx_v12_1_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
2491 
2492 		gfx_v12_1_xcc_cp_set_doorbell_range(adev, xcc_id);
2493 
2494 		gfx_v12_1_xcc_cp_compute_enable(adev, true, xcc_id);
2495 
2496 		if (adev->enable_mes_kiq && adev->mes.kiq_hw_init)
2497 			r = amdgpu_mes_kiq_hw_init(adev, xcc_id);
2498 		else
2499 			r = gfx_v12_1_xcc_kiq_resume(adev, xcc_id);
2500 		if (r)
2501 			return r;
2502 
2503 		r = gfx_v12_1_xcc_kcq_resume(adev, xcc_id);
2504 		if (r)
2505 			return r;
2506 
2507 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2508 			ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
2509 			r = amdgpu_ring_test_helper(ring);
2510 			if (r)
2511 				return r;
2512 		}
2513 	}
2514 
2515 	return 0;
2516 }
2517 
2518 static int gfx_v12_1_cp_resume(struct amdgpu_device *adev)
2519 {
2520 	int num_xcc, num_xcp, num_xcc_per_xcp;
2521 	uint16_t xcc_mask;
2522 	int r = 0;
2523 
2524 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2525 	if (amdgpu_sriov_vf(adev)) {
2526 		enum amdgpu_gfx_partition mode;
2527 
2528 		mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2529 						       AMDGPU_XCP_FL_NONE);
2530 		if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2531 			return -EINVAL;
2532 		if (adev->gfx.funcs &&
2533 		    adev->gfx.funcs->get_xccs_per_xcp) {
2534 			num_xcc_per_xcp = adev->gfx.funcs->get_xccs_per_xcp(adev);
2535 			adev->gfx.num_xcc_per_xcp = num_xcc_per_xcp;
2536 			num_xcp = num_xcc / num_xcc_per_xcp;
2537 		} else {
2538 			return -EINVAL;
2539 		}
2540 		r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode);
2541 
2542 	} else {
2543 		if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2544 						    AMDGPU_XCP_FL_NONE) ==
2545 		    AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2546 			r = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr,
2547 							     amdgpu_user_partt_mode);
2548 	}
2549 
2550 	if (r)
2551 		return r;
2552 
2553 	xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
2554 
2555 	return gfx_v12_1_xcc_cp_resume(adev, xcc_mask);
2556 }
2557 
2558 static int gfx_v12_1_gfxhub_enable(struct amdgpu_device *adev)
2559 {
2560 	int r, i;
2561 	bool value;
2562 
2563 	r = adev->gfxhub.funcs->gart_enable(adev);
2564 	if (r)
2565 		return r;
2566 
2567 	value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
2568 		false : true;
2569 
2570 	adev->gfxhub.funcs->set_fault_enable_default(adev, value);
2571 	/* TODO investigate why TLB flush is needed,
2572 	 * are we missing a flush somewhere else? */
2573 	for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
2574 		if (AMDGPU_IS_GFXHUB(i))
2575 			adev->gmc.gmc_funcs->flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(i), 0);
2576 	}
2577 
2578 	return 0;
2579 }
2580 
2581 static int get_gb_addr_config(struct amdgpu_device *adev)
2582 {
2583 	u32 gb_addr_config;
2584 
2585 	gb_addr_config = RREG32_SOC15(GC, GET_INST(GC, 0), regGB_ADDR_CONFIG_READ);
2586 	if (gb_addr_config == 0)
2587 		return -EINVAL;
2588 
2589 	adev->gfx.config.gb_addr_config_fields.num_pkrs =
2590 		1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG_READ, NUM_PKRS);
2591 
2592 	adev->gfx.config.gb_addr_config = gb_addr_config;
2593 
2594 	adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
2595 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2596 				      GB_ADDR_CONFIG_READ, NUM_PIPES);
2597 
2598 	adev->gfx.config.max_tile_pipes =
2599 		adev->gfx.config.gb_addr_config_fields.num_pipes;
2600 
2601 	adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
2602 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2603 				      GB_ADDR_CONFIG_READ, MAX_COMPRESSED_FRAGS);
2604 	adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
2605 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2606 				      GB_ADDR_CONFIG_READ, NUM_RB_PER_SE);
2607 	adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
2608 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2609 				      GB_ADDR_CONFIG_READ, NUM_SHADER_ENGINES);
2610 	adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
2611 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2612 				      GB_ADDR_CONFIG_READ, PIPE_INTERLEAVE_SIZE));
2613 
2614 	return 0;
2615 }
2616 
2617 static void gfx_v12_1_xcc_disable_gpa_mode(struct amdgpu_device *adev,
2618 					   int xcc_id)
2619 {
2620 	uint32_t data;
2621 
2622 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG);
2623 	data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK;
2624 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data);
2625 
2626 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPG_PSP_DEBUG);
2627 	data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK;
2628 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPG_PSP_DEBUG, data);
2629 }
2630 
2631 static void gfx_v12_1_xcc_setup_tcp_thrashing_ctrl(struct amdgpu_device *adev,
2632 					 int xcc_id)
2633 {
2634 	uint32_t val;
2635 
2636 	/* Set the TCP UTCL0 register to enable atomics */
2637 	val = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2638 					regTCP_UTCL0_THRASHING_CTRL);
2639 	val = REG_SET_FIELD(val, TCP_UTCL0_THRASHING_CTRL, THRASHING_EN, 0x2);
2640 	val = REG_SET_FIELD(val, TCP_UTCL0_THRASHING_CTRL,
2641 					RETRY_FRAGMENT_THRESHOLD_UP_EN, 0x1);
2642 	val = REG_SET_FIELD(val, TCP_UTCL0_THRASHING_CTRL,
2643 					RETRY_FRAGMENT_THRESHOLD_DOWN_EN, 0x1);
2644 
2645 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2646 					regTCP_UTCL0_THRASHING_CTRL, val);
2647 }
2648 
2649 static void gfx_v12_1_xcc_enable_atomics(struct amdgpu_device *adev,
2650 					 int xcc_id)
2651 {
2652 	uint32_t data;
2653 
2654 	/* Set the TCP UTCL0 register to enable atomics */
2655 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regTCP_UTCL0_CNTL1);
2656 	data = REG_SET_FIELD(data, TCP_UTCL0_CNTL1, ATOMIC_REQUESTER_EN, 0x1);
2657 
2658 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regTCP_UTCL0_CNTL1, data);
2659 }
2660 
2661 static void gfx_v12_1_xcc_disable_burst(struct amdgpu_device *adev,
2662 					int xcc_id)
2663 {
2664 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGL1_DRAM_BURST_CTRL, 0xf);
2665 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGLARB_DRAM_BURST_CTRL, 0xf);
2666 }
2667 
2668 static void gfx_v12_1_xcc_disable_early_write_ack(struct amdgpu_device *adev,
2669 					int xcc_id)
2670 {
2671 	uint32_t data;
2672 
2673 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regTCP_CNTL3);
2674 	data = REG_SET_FIELD(data, TCP_CNTL3, DISABLE_EARLY_WRITE_ACK, 0x1);
2675 
2676 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regTCP_CNTL3, data);
2677 }
2678 
2679 static void gfx_v12_1_xcc_disable_tcp_spill_cache(struct amdgpu_device *adev,
2680 					int xcc_id)
2681 {
2682 	uint32_t data;
2683 
2684 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regTCP_CNTL);
2685 	data = REG_SET_FIELD(data, TCP_CNTL, TCP_SPILL_CACHE_DISABLE, 0x1);
2686 
2687 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regTCP_CNTL, data);
2688 }
2689 
2690 static void gfx_v12_1_init_golden_registers(struct amdgpu_device *adev)
2691 {
2692 	int i;
2693 
2694 	for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); i++) {
2695 		gfx_v12_1_xcc_disable_burst(adev, i);
2696 		gfx_v12_1_xcc_enable_atomics(adev, i);
2697 		gfx_v12_1_xcc_setup_tcp_thrashing_ctrl(adev, i);
2698 		gfx_v12_1_xcc_disable_early_write_ack(adev, i);
2699 		gfx_v12_1_xcc_disable_tcp_spill_cache(adev, i);
2700 	}
2701 }
2702 
2703 static int gfx_v12_1_hw_init(struct amdgpu_ip_block *ip_block)
2704 {
2705 	int r, i, num_xcc;
2706 	struct amdgpu_device *adev = ip_block->adev;
2707 
2708 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
2709 		/* rlc autoload firmware */
2710 		r = gfx_v12_1_rlc_backdoor_autoload_enable(adev);
2711 		if (r)
2712 			return r;
2713 	} else {
2714 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
2715 			num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2716 
2717 			if (adev->gfx.imu.funcs) {
2718 				if (adev->gfx.imu.funcs->load_microcode)
2719 					adev->gfx.imu.funcs->load_microcode(adev);
2720 			}
2721 
2722 			for (i = 0; i < num_xcc; i++) {
2723 				/* disable gpa mode in backdoor loading */
2724 				gfx_v12_1_xcc_disable_gpa_mode(adev, i);
2725 			}
2726 		}
2727 	}
2728 
2729 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) ||
2730 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2731 		r = gfx_v12_1_wait_for_rlc_autoload_complete(adev);
2732 		if (r) {
2733 			dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r);
2734 			return r;
2735 		}
2736 	}
2737 
2738 	adev->gfx.is_poweron = true;
2739 
2740 	if (get_gb_addr_config(adev))
2741 		DRM_WARN("Invalid gb_addr_config !\n");
2742 
2743 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2744 		gfx_v12_1_config_gfx_rs64(adev);
2745 
2746 	r = gfx_v12_1_gfxhub_enable(adev);
2747 	if (r)
2748 		return r;
2749 
2750 	gfx_v12_1_init_golden_registers(adev);
2751 
2752 	gfx_v12_1_constants_init(adev);
2753 
2754 	if (adev->nbio.funcs->gc_doorbell_init)
2755 		adev->nbio.funcs->gc_doorbell_init(adev);
2756 
2757 	r = gfx_v12_1_rlc_resume(adev);
2758 	if (r)
2759 		return r;
2760 
2761 	/*
2762 	 * init golden registers and rlc resume may override some registers,
2763 	 * reconfig them here
2764 	 */
2765 	gfx_v12_1_tcp_harvest(adev);
2766 
2767 	r = gfx_v12_1_cp_resume(adev);
2768 	if (r)
2769 		return r;
2770 
2771 	return r;
2772 }
2773 
2774 static void gfx_v12_1_xcc_fini(struct amdgpu_device *adev,
2775 			      int xcc_id)
2776 {
2777 	uint32_t tmp;
2778 
2779 	if (!adev->no_hw_access) {
2780 		if (amdgpu_gfx_disable_kcq(adev, xcc_id))
2781 			DRM_ERROR("KCQ disable failed\n");
2782 
2783 		amdgpu_mes_kiq_hw_fini(adev, xcc_id);
2784 	}
2785 
2786 	if (amdgpu_sriov_vf(adev)) {
2787 		/* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
2788 		tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
2789 		tmp &= 0xffffff00;
2790 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
2791 	}
2792 	gfx_v12_1_xcc_cp_compute_enable(adev, false, xcc_id);
2793 	gfx_v12_1_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
2794 }
2795 
2796 static int gfx_v12_1_hw_fini(struct amdgpu_ip_block *ip_block)
2797 {
2798 	struct amdgpu_device *adev = ip_block->adev;
2799 	int i, num_xcc;
2800 
2801 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
2802 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
2803 
2804 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2805 	for (i = 0; i < num_xcc; i++) {
2806 		gfx_v12_1_xcc_fini(adev, i);
2807 	}
2808 
2809 	adev->gfxhub.funcs->gart_disable(adev);
2810 
2811 	adev->gfx.is_poweron = false;
2812 
2813 	return 0;
2814 }
2815 
2816 static int gfx_v12_1_suspend(struct amdgpu_ip_block *ip_block)
2817 {
2818 	return gfx_v12_1_hw_fini(ip_block);
2819 }
2820 
2821 static int gfx_v12_1_resume(struct amdgpu_ip_block *ip_block)
2822 {
2823 	return gfx_v12_1_hw_init(ip_block);
2824 }
2825 
2826 static bool gfx_v12_1_is_idle(struct amdgpu_ip_block *ip_block)
2827 {
2828 	struct amdgpu_device *adev = ip_block->adev;
2829 	int i, num_xcc;
2830 
2831 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2832 	for (i = 0; i < num_xcc; i++) {
2833 		if (REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, i),
2834 				regGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE))
2835 			return false;
2836 	}
2837 	return true;
2838 }
2839 
2840 static int gfx_v12_1_wait_for_idle(struct amdgpu_ip_block *ip_block)
2841 {
2842 	unsigned i;
2843 	struct amdgpu_device *adev = ip_block->adev;
2844 
2845 	for (i = 0; i < adev->usec_timeout; i++) {
2846 		if (gfx_v12_1_is_idle(ip_block))
2847 			return 0;
2848 		udelay(1);
2849 	}
2850 	return -ETIMEDOUT;
2851 }
2852 
2853 static uint64_t gfx_v12_1_get_gpu_clock_counter(struct amdgpu_device *adev)
2854 {
2855 	uint64_t clock = 0;
2856 
2857 	if (adev->smuio.funcs &&
2858 	    adev->smuio.funcs->get_gpu_clock_counter)
2859 		clock = adev->smuio.funcs->get_gpu_clock_counter(adev);
2860 	else
2861 		dev_warn(adev->dev, "query gpu clock counter is not supported\n");
2862 
2863 	return clock;
2864 }
2865 
2866 static int gfx_v12_1_early_init(struct amdgpu_ip_block *ip_block)
2867 {
2868 	struct amdgpu_device *adev = ip_block->adev;
2869 
2870 	adev->gfx.funcs = &gfx_v12_1_gfx_funcs;
2871 
2872 	adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
2873 					  AMDGPU_MAX_COMPUTE_RINGS);
2874 
2875 	gfx_v12_1_set_kiq_pm4_funcs(adev);
2876 	gfx_v12_1_set_ring_funcs(adev);
2877 	gfx_v12_1_set_irq_funcs(adev);
2878 	gfx_v12_1_set_rlc_funcs(adev);
2879 	gfx_v12_1_set_mqd_funcs(adev);
2880 	gfx_v12_1_set_imu_funcs(adev);
2881 
2882 	gfx_v12_1_init_rlcg_reg_access_ctrl(adev);
2883 
2884 	return gfx_v12_1_init_microcode(adev);
2885 }
2886 
2887 static int gfx_v12_1_late_init(struct amdgpu_ip_block *ip_block)
2888 {
2889 	struct amdgpu_device *adev = ip_block->adev;
2890 	int r;
2891 
2892 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
2893 	if (r)
2894 		return r;
2895 
2896 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
2897 	if (r)
2898 		return r;
2899 
2900 	return 0;
2901 }
2902 
2903 static bool gfx_v12_1_is_rlc_enabled(struct amdgpu_device *adev)
2904 {
2905 	uint32_t rlc_cntl;
2906 
2907 	/* if RLC is not enabled, do nothing */
2908 	rlc_cntl = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL);
2909 	return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
2910 }
2911 
2912 static void gfx_v12_1_xcc_set_safe_mode(struct amdgpu_device *adev,
2913 					int xcc_id)
2914 {
2915 	uint32_t data;
2916 	unsigned i;
2917 
2918 	data = RLC_SAFE_MODE__CMD_MASK;
2919 	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
2920 
2921 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
2922 
2923 	/* wait for RLC_SAFE_MODE */
2924 	for (i = 0; i < adev->usec_timeout; i++) {
2925 		if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2926 						regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
2927 			break;
2928 		udelay(1);
2929 	}
2930 }
2931 
2932 static void gfx_v12_1_xcc_unset_safe_mode(struct amdgpu_device *adev,
2933 					  int xcc_id)
2934 {
2935 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2936 		     regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK);
2937 }
2938 
2939 static void gfx_v12_1_update_perf_clk(struct amdgpu_device *adev,
2940 				      bool enable)
2941 {
2942 	int i, num_xcc;
2943 
2944 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2945 	for (i = 0; i < num_xcc; i++)
2946 		gfx_v12_1_xcc_update_perf_clk(adev, enable, i);
2947 }
2948 
2949 static void gfx_v12_1_update_spm_vmid(struct amdgpu_device *adev,
2950 				      int xcc_id,
2951 				      struct amdgpu_ring *ring,
2952 				      unsigned vmid)
2953 {
2954 	u32 reg, data;
2955 
2956 	reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL);
2957 	if (amdgpu_sriov_is_pp_one_vf(adev))
2958 		data = RREG32_NO_KIQ(reg);
2959 	else
2960 		data = RREG32(reg);
2961 
2962 	data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
2963 	data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
2964 
2965 	if (amdgpu_sriov_is_pp_one_vf(adev))
2966 		WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL, data);
2967 	else
2968 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL, data);
2969 
2970 	if (ring
2971 	    && amdgpu_sriov_is_pp_one_vf(adev)
2972 	    && ((ring->funcs->type == AMDGPU_RING_TYPE_GFX)
2973 		|| (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE))) {
2974 		uint32_t reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL);
2975 		amdgpu_ring_emit_wreg(ring, reg, data);
2976 	}
2977 }
2978 
2979 static const struct amdgpu_rlc_funcs gfx_v12_1_rlc_funcs = {
2980 	.is_rlc_enabled = gfx_v12_1_is_rlc_enabled,
2981 	.set_safe_mode = gfx_v12_1_xcc_set_safe_mode,
2982 	.unset_safe_mode = gfx_v12_1_xcc_unset_safe_mode,
2983 	.init = gfx_v12_1_rlc_init,
2984 	.get_csb_size = gfx_v12_1_get_csb_size,
2985 	.get_csb_buffer = gfx_v12_1_get_csb_buffer,
2986 	.resume = gfx_v12_1_rlc_resume,
2987 	.stop = gfx_v12_1_rlc_stop,
2988 	.reset = gfx_v12_1_rlc_reset,
2989 	.start = gfx_v12_1_rlc_start,
2990 	.update_spm_vmid = gfx_v12_1_update_spm_vmid,
2991 };
2992 
2993 #if 0
2994 static void gfx_v12_cntl_power_gating(struct amdgpu_device *adev, bool enable)
2995 {
2996 	/* TODO */
2997 }
2998 
2999 static void gfx_v12_cntl_pg(struct amdgpu_device *adev, bool enable)
3000 {
3001 	/* TODO */
3002 }
3003 #endif
3004 
3005 static int gfx_v12_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
3006 					   enum amd_powergating_state state)
3007 {
3008 	struct amdgpu_device *adev = ip_block->adev;
3009 	bool enable = (state == AMD_PG_STATE_GATE);
3010 
3011 	if (amdgpu_sriov_vf(adev))
3012 		return 0;
3013 
3014 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
3015 	case IP_VERSION(12, 1, 0):
3016 		amdgpu_gfx_off_ctrl(adev, enable);
3017 		break;
3018 	default:
3019 		break;
3020 	}
3021 
3022 	return 0;
3023 }
3024 
3025 static void gfx_v12_1_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
3026 							   bool enable, int xcc_id)
3027 {
3028 	uint32_t def, data;
3029 
3030 	if (!(adev->cg_flags &
3031 	      (AMD_CG_SUPPORT_GFX_CGCG |
3032 	      AMD_CG_SUPPORT_GFX_CGLS |
3033 	      AMD_CG_SUPPORT_GFX_3D_CGCG |
3034 	      AMD_CG_SUPPORT_GFX_3D_CGLS)))
3035 		return;
3036 
3037 	if (enable) {
3038 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
3039 					  regRLC_CGTT_MGCG_OVERRIDE);
3040 
3041 		/* unset CGCG override */
3042 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
3043 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
3044 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3045 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3046 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG ||
3047 		    adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
3048 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
3049 
3050 		/* update CGCG override bits */
3051 		if (def != data)
3052 			WREG32_SOC15(GC, GET_INST(GC, xcc_id),
3053 				     regRLC_CGTT_MGCG_OVERRIDE, data);
3054 
3055 		/* enable cgcg FSM(0x0000363F) */
3056 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
3057 
3058 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
3059 			data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK;
3060 			data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3061 				 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
3062 		}
3063 
3064 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
3065 			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK;
3066 			data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3067 				 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3068 		}
3069 
3070 		if (def != data)
3071 			WREG32_SOC15(GC, GET_INST(GC, xcc_id),
3072 				     regRLC_CGCG_CGLS_CTRL, data);
3073 
3074 		/* set IDLE_POLL_COUNT(0x00900100) */
3075 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL);
3076 
3077 		data &= ~CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK;
3078 		data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
3079 		data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3080 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3081 
3082 		if (def != data)
3083 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data);
3084 
3085 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL);
3086 		data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
3087 		data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
3088 		data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
3089 		data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
3090 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL, data);
3091 	} else {
3092 		/* Program RLC_CGCG_CGLS_CTRL */
3093 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
3094 
3095 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
3096 			data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
3097 
3098 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3099 			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3100 
3101 		if (def != data)
3102 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
3103 	}
3104 }
3105 
3106 static void gfx_v12_1_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev,
3107 							   bool enable, int xcc_id)
3108 {
3109 	uint32_t data, def;
3110 	if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)))
3111 		return;
3112 
3113 	/* It is disabled by HW by default */
3114 	if (enable) {
3115 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
3116 			/* 1 - RLC_CGTT_MGCG_OVERRIDE */
3117 			def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
3118 
3119 			data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3120 				  RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
3121 				  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
3122 
3123 			if (def != data)
3124 				WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
3125 		}
3126 	} else {
3127 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
3128 			def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
3129 
3130 			data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
3131 				 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3132 				 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
3133 
3134 			if (def != data)
3135 				WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
3136 		}
3137 	}
3138 }
3139 
3140 static void gfx_v12_1_xcc_update_repeater_fgcg(struct amdgpu_device *adev,
3141 					       bool enable, int xcc_id)
3142 {
3143 	uint32_t def, data;
3144 
3145 	if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
3146 		return;
3147 
3148 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
3149 
3150 	if (enable)
3151 		data &= ~(RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK |
3152 				  RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE_MASK);
3153 	else
3154 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK |
3155 				RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE_MASK;
3156 
3157 	if (def != data)
3158 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
3159 }
3160 
3161 static void gfx_v12_1_xcc_update_sram_fgcg(struct amdgpu_device *adev,
3162 					   bool enable, int xcc_id)
3163 {
3164 	uint32_t def, data;
3165 
3166 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
3167 		return;
3168 
3169 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
3170 
3171 	if (enable)
3172 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
3173 	else
3174 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
3175 
3176 	if (def != data)
3177 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
3178 }
3179 
3180 static void gfx_v12_1_xcc_update_perf_clk(struct amdgpu_device *adev,
3181 					  bool enable, int xcc_id)
3182 {
3183 	uint32_t def, data;
3184 
3185 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK))
3186 		return;
3187 
3188 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
3189 
3190 	if (enable)
3191 		data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
3192 	else
3193 		data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
3194 
3195 	if (def != data)
3196 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
3197 }
3198 
3199 static int gfx_v12_1_xcc_update_gfx_clock_gating(struct amdgpu_device *adev,
3200 					     bool enable, int xcc_id)
3201 {
3202 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
3203 
3204 	gfx_v12_1_xcc_update_coarse_grain_clock_gating(adev, enable, xcc_id);
3205 
3206 	gfx_v12_1_xcc_update_medium_grain_clock_gating(adev, enable, xcc_id);
3207 
3208 	gfx_v12_1_xcc_update_repeater_fgcg(adev, enable, xcc_id);
3209 
3210 	gfx_v12_1_xcc_update_sram_fgcg(adev, enable, xcc_id);
3211 
3212 	gfx_v12_1_xcc_update_perf_clk(adev, enable, xcc_id);
3213 
3214 	if (adev->cg_flags &
3215 	    (AMD_CG_SUPPORT_GFX_MGCG |
3216 	     AMD_CG_SUPPORT_GFX_CGLS |
3217 	     AMD_CG_SUPPORT_GFX_CGCG |
3218 	     AMD_CG_SUPPORT_GFX_3D_CGCG |
3219 	     AMD_CG_SUPPORT_GFX_3D_CGLS))
3220 		gfx_v12_1_xcc_enable_gui_idle_interrupt(adev, enable, xcc_id);
3221 
3222 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
3223 
3224 	return 0;
3225 }
3226 
3227 static int gfx_v12_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
3228 					   enum amd_clockgating_state state)
3229 {
3230 	struct amdgpu_device *adev = ip_block->adev;
3231 	int i, num_xcc;
3232 
3233 	if (amdgpu_sriov_vf(adev))
3234 		return 0;
3235 
3236 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3237 	switch (adev->ip_versions[GC_HWIP][0]) {
3238 	case IP_VERSION(12, 1, 0):
3239 		for (i = 0; i < num_xcc; i++)
3240 			gfx_v12_1_xcc_update_gfx_clock_gating(adev,
3241 				  state == AMD_CG_STATE_GATE, i);
3242 		break;
3243 	default:
3244 		break;
3245 	}
3246 
3247 	return 0;
3248 }
3249 
3250 static void gfx_v12_1_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
3251 {
3252 	struct amdgpu_device *adev = ip_block->adev;
3253 	int data;
3254 
3255 	/* AMD_CG_SUPPORT_GFX_MGCG */
3256 	data = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CGTT_MGCG_OVERRIDE);
3257 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
3258 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
3259 
3260 	/* AMD_CG_SUPPORT_REPEATER_FGCG */
3261 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK))
3262 		*flags |= AMD_CG_SUPPORT_REPEATER_FGCG;
3263 
3264 	/* AMD_CG_SUPPORT_GFX_FGCG */
3265 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK))
3266 		*flags |= AMD_CG_SUPPORT_GFX_FGCG;
3267 
3268 	/* AMD_CG_SUPPORT_GFX_PERF_CLK */
3269 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK))
3270 		*flags |= AMD_CG_SUPPORT_GFX_PERF_CLK;
3271 
3272 	/* AMD_CG_SUPPORT_GFX_CGCG */
3273 	data = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CGCG_CGLS_CTRL);
3274 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
3275 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
3276 
3277 	/* AMD_CG_SUPPORT_GFX_CGLS */
3278 	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
3279 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
3280 }
3281 
3282 static u64 gfx_v12_1_ring_get_rptr_compute(struct amdgpu_ring *ring)
3283 {
3284 	/* gfx12 hardware is 32bit rptr */
3285 	return *(uint32_t *)ring->rptr_cpu_addr;
3286 }
3287 
3288 static u64 gfx_v12_1_ring_get_wptr_compute(struct amdgpu_ring *ring)
3289 {
3290 	u64 wptr;
3291 
3292 	/* XXX check if swapping is necessary on BE */
3293 	if (ring->use_doorbell)
3294 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
3295 	else
3296 		BUG();
3297 	return wptr;
3298 }
3299 
3300 static void gfx_v12_1_ring_set_wptr_compute(struct amdgpu_ring *ring)
3301 {
3302 	struct amdgpu_device *adev = ring->adev;
3303 
3304 	/* XXX check if swapping is necessary on BE */
3305 	if (ring->use_doorbell) {
3306 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
3307 			     ring->wptr);
3308 		WDOORBELL64(ring->doorbell_index, ring->wptr);
3309 	} else {
3310 		BUG(); /* only DOORBELL method supported on gfx12 now */
3311 	}
3312 }
3313 
3314 static void gfx_v12_1_ring_emit_ib_compute(struct amdgpu_ring *ring,
3315 					   struct amdgpu_job *job,
3316 					   struct amdgpu_ib *ib,
3317 					   uint32_t flags)
3318 {
3319 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
3320 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
3321 
3322 	/* Currently, there is a high possibility to get wave ID mismatch
3323 	 * between ME and GDS, leading to a hw deadlock, because ME generates
3324 	 * different wave IDs than the GDS expects. This situation happens
3325 	 * randomly when at least 5 compute pipes use GDS ordered append.
3326 	 * The wave IDs generated by ME are also wrong after suspend/resume.
3327 	 * Those are probably bugs somewhere else in the kernel driver.
3328 	 *
3329 	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
3330 	 * GDS to 0 for this ring (me/pipe).
3331 	 */
3332 	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
3333 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3334 		amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
3335 	}
3336 
3337 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3338 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3339 	amdgpu_ring_write(ring,
3340 #ifdef __BIG_ENDIAN
3341 				(2 << 0) |
3342 #endif
3343 				lower_32_bits(ib->gpu_addr));
3344 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
3345 	amdgpu_ring_write(ring, control);
3346 }
3347 
3348 static void gfx_v12_1_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
3349 				     u64 seq, unsigned flags)
3350 {
3351 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
3352 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
3353 
3354 	/* RELEASE_MEM - flush caches, send int */
3355 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
3356 	amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ(1) |
3357 				 PACKET3_RELEASE_MEM_GCR_GLV_WB |
3358 				 PACKET3_RELEASE_MEM_GCR_GL2_WB |
3359 				 PACKET3_RELEASE_MEM_GCR_GL2_SCOPE(2) |
3360 				 PACKET3_RELEASE_MEM_TEMPORAL(3) |
3361 				 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
3362 				 PACKET3_RELEASE_MEM_EVENT_INDEX(5)));
3363 	amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) |
3364 				 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0)));
3365 
3366 	/*
3367 	 * the address should be Qword aligned if 64bit write, Dword
3368 	 * aligned if only send 32bit data low (discard data high)
3369 	 */
3370 	if (write64bit)
3371 		BUG_ON(addr & 0x7);
3372 	else
3373 		BUG_ON(addr & 0x3);
3374 	amdgpu_ring_write(ring, lower_32_bits(addr));
3375 	amdgpu_ring_write(ring, upper_32_bits(addr));
3376 	amdgpu_ring_write(ring, lower_32_bits(seq));
3377 	amdgpu_ring_write(ring, upper_32_bits(seq));
3378 	amdgpu_ring_write(ring, 0);
3379 }
3380 
3381 static void gfx_v12_1_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
3382 {
3383 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3384 	uint32_t seq = ring->fence_drv.sync_seq;
3385 	uint64_t addr = ring->fence_drv.gpu_addr;
3386 
3387 	gfx_v12_1_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr),
3388 			       upper_32_bits(addr), seq, 0xffffffff, 4);
3389 }
3390 
3391 static void gfx_v12_1_ring_invalidate_tlbs(struct amdgpu_ring *ring,
3392 				   uint16_t pasid, uint32_t flush_type,
3393 				   bool all_hub, uint8_t dst_sel)
3394 {
3395 	amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
3396 	amdgpu_ring_write(ring,
3397 			  PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) |
3398 			  PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
3399 			  PACKET3_INVALIDATE_TLBS_PASID(pasid) |
3400 			  PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
3401 }
3402 
3403 static void gfx_v12_1_ring_emit_vm_flush(struct amdgpu_ring *ring,
3404 					 unsigned vmid, uint64_t pd_addr)
3405 {
3406 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
3407 
3408 	/* compute doesn't have PFP */
3409 	if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
3410 		/* sync PFP to ME, otherwise we might get invalid PFP reads */
3411 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3412 		amdgpu_ring_write(ring, 0x0);
3413 	}
3414 }
3415 
3416 static void gfx_v12_1_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
3417 					  u64 seq, unsigned int flags)
3418 {
3419 	struct amdgpu_device *adev = ring->adev;
3420 
3421 	/* we only allocate 32bit for each seq wb address */
3422 	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
3423 
3424 	/* write fence seq to the "addr" */
3425 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3426 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3427 				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
3428 	amdgpu_ring_write(ring, lower_32_bits(addr));
3429 	amdgpu_ring_write(ring, upper_32_bits(addr));
3430 	amdgpu_ring_write(ring, lower_32_bits(seq));
3431 
3432 	if (flags & AMDGPU_FENCE_FLAG_INT) {
3433 		/* set register to trigger INT */
3434 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3435 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3436 					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
3437 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCPC_INT_STATUS));
3438 		amdgpu_ring_write(ring, 0);
3439 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
3440 	}
3441 }
3442 
3443 static void gfx_v12_1_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
3444 				     uint32_t reg_val_offs)
3445 {
3446 	struct amdgpu_device *adev = ring->adev;
3447 
3448 	reg = soc_v1_0_normalize_xcc_reg_offset(reg);
3449 
3450 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
3451 	amdgpu_ring_write(ring, 0 |	/* src: register*/
3452 				(5 << 8) |	/* dst: memory */
3453 				(1 << 20));	/* write confirm */
3454 	amdgpu_ring_write(ring, reg);
3455 	amdgpu_ring_write(ring, 0);
3456 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
3457 				reg_val_offs * 4));
3458 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
3459 				reg_val_offs * 4));
3460 }
3461 
3462 static void gfx_v12_1_ring_emit_wreg(struct amdgpu_ring *ring,
3463 				     uint32_t reg,
3464 				     uint32_t val)
3465 {
3466 	uint32_t cmd = 0;
3467 
3468 	reg = soc_v1_0_normalize_xcc_reg_offset(reg);
3469 
3470 	switch (ring->funcs->type) {
3471 	case AMDGPU_RING_TYPE_KIQ:
3472 		cmd = (1 << 16); /* no inc addr */
3473 		break;
3474 	default:
3475 		cmd = WR_CONFIRM;
3476 		break;
3477 	}
3478 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3479 	amdgpu_ring_write(ring, cmd);
3480 	amdgpu_ring_write(ring, reg);
3481 	amdgpu_ring_write(ring, 0);
3482 	amdgpu_ring_write(ring, val);
3483 }
3484 
3485 static void gfx_v12_1_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
3486 					uint32_t val, uint32_t mask)
3487 {
3488 	gfx_v12_1_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
3489 }
3490 
3491 static void gfx_v12_1_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
3492 						   uint32_t reg0, uint32_t reg1,
3493 						   uint32_t ref, uint32_t mask)
3494 {
3495 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3496 
3497 	gfx_v12_1_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
3498 			       ref, mask, 0x20);
3499 }
3500 
3501 static void gfx_v12_1_xcc_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
3502 							int me, int pipe,
3503 							enum amdgpu_interrupt_state state,
3504 							int xcc_id)
3505 {
3506 	u32 mec_int_cntl, mec_int_cntl_reg;
3507 
3508 	/*
3509 	 * amdgpu controls only the first MEC. That's why this function only
3510 	 * handles the setting of interrupts for this specific MEC. All other
3511 	 * pipes' interrupts are set by amdkfd.
3512 	 */
3513 
3514 	if (me == 1) {
3515 		switch (pipe) {
3516 		case 0:
3517 			mec_int_cntl_reg = SOC15_REG_OFFSET(
3518 					GC, GET_INST(GC, xcc_id),
3519 					regCP_ME1_PIPE0_INT_CNTL);
3520 			break;
3521 		case 1:
3522 			mec_int_cntl_reg = SOC15_REG_OFFSET(
3523 					GC, GET_INST(GC, xcc_id),
3524 					regCP_ME1_PIPE1_INT_CNTL);
3525 			break;
3526 		case 2:
3527 			mec_int_cntl_reg = SOC15_REG_OFFSET(
3528 					GC, GET_INST(GC, xcc_id),
3529 					regCP_ME1_PIPE2_INT_CNTL);
3530 			break;
3531 		case 3:
3532 			mec_int_cntl_reg = SOC15_REG_OFFSET(
3533 					GC, GET_INST(GC, xcc_id),
3534 					regCP_ME1_PIPE3_INT_CNTL);
3535 			break;
3536 		default:
3537 			DRM_DEBUG("invalid pipe %d\n", pipe);
3538 			return;
3539 		}
3540 	} else {
3541 		DRM_DEBUG("invalid me %d\n", me);
3542 		return;
3543 	}
3544 
3545 	switch (state) {
3546 	case AMDGPU_IRQ_STATE_DISABLE:
3547 		mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3548 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3549 					     TIME_STAMP_INT_ENABLE, 0);
3550 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3551 					     GENERIC0_INT_ENABLE, 0);
3552 		WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3553 		break;
3554 	case AMDGPU_IRQ_STATE_ENABLE:
3555 		mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3556 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3557 					     TIME_STAMP_INT_ENABLE, 1);
3558 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3559 					     GENERIC0_INT_ENABLE, 1);
3560 		WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3561 		break;
3562 	default:
3563 		break;
3564 	}
3565 }
3566 
3567 static int gfx_v12_1_set_eop_interrupt_state(struct amdgpu_device *adev,
3568 					    struct amdgpu_irq_src *src,
3569 					    unsigned type,
3570 					    enum amdgpu_interrupt_state state)
3571 {
3572 	int i, num_xcc;
3573 
3574 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3575 	for (i = 0; i < num_xcc; i++) {
3576 		switch (type) {
3577 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
3578 			gfx_v12_1_xcc_set_compute_eop_interrupt_state(
3579 					adev, 1, 0, state, i);
3580 			break;
3581 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
3582 			gfx_v12_1_xcc_set_compute_eop_interrupt_state(
3583 					adev, 1, 1, state, i);
3584 			break;
3585 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
3586 			gfx_v12_1_xcc_set_compute_eop_interrupt_state(
3587 					adev, 1, 2, state, i);
3588 			break;
3589 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
3590 			gfx_v12_1_xcc_set_compute_eop_interrupt_state(
3591 					adev, 1, 3, state, i);
3592 			break;
3593 		default:
3594 			break;
3595 		}
3596 	}
3597 
3598 	return 0;
3599 }
3600 
3601 static int gfx_v12_1_eop_irq(struct amdgpu_device *adev,
3602 			     struct amdgpu_irq_src *source,
3603 			     struct amdgpu_iv_entry *entry)
3604 {
3605 	int i, xcc_id;
3606 	u8 me_id, pipe_id, queue_id;
3607 	struct amdgpu_ring *ring;
3608 	uint32_t mes_queue_id = entry->src_data[0];
3609 
3610 	DRM_DEBUG("IH: CP EOP\n");
3611 
3612 	if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
3613 		struct amdgpu_mes_queue *queue;
3614 
3615 		mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
3616 
3617 		spin_lock(&adev->mes.queue_id_lock);
3618 		queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
3619 		if (queue) {
3620 			DRM_DEBUG("process mes queue id = %d\n", mes_queue_id);
3621 			amdgpu_fence_process(queue->ring);
3622 		}
3623 		spin_unlock(&adev->mes.queue_id_lock);
3624 	} else {
3625 		me_id = (entry->ring_id & 0x0c) >> 2;
3626 		pipe_id = (entry->ring_id & 0x03) >> 0;
3627 		queue_id = (entry->ring_id & 0x70) >> 4;
3628 		xcc_id = gfx_v12_1_ih_to_xcc_inst(adev, entry->node_id);
3629 
3630 		if (xcc_id == -EINVAL)
3631 			return -EINVAL;
3632 
3633 		switch (me_id) {
3634 		case 0:
3635 			if (pipe_id == 0)
3636 				amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
3637 			else
3638 				amdgpu_fence_process(&adev->gfx.gfx_ring[1]);
3639 			break;
3640 		case 1:
3641 		case 2:
3642 			for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3643 				ring = &adev->gfx.compute_ring
3644 						[i +
3645 						 xcc_id * adev->gfx.num_compute_rings];
3646 				/* Per-queue interrupt is supported for MEC starting from VI.
3647 				 * The interrupt can only be enabled/disabled per pipe instead
3648 				 * of per queue.
3649 				 */
3650 				if ((ring->me == me_id) &&
3651 				    (ring->pipe == pipe_id) &&
3652 				    (ring->queue == queue_id))
3653 					amdgpu_fence_process(ring);
3654 			}
3655 			break;
3656 		}
3657 	}
3658 
3659 	return 0;
3660 }
3661 
3662 static int gfx_v12_1_set_priv_reg_fault_state(struct amdgpu_device *adev,
3663 					      struct amdgpu_irq_src *source,
3664 					      unsigned type,
3665 					      enum amdgpu_interrupt_state state)
3666 {
3667 	int i, num_xcc;
3668 
3669 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3670 	switch (state) {
3671 	case AMDGPU_IRQ_STATE_DISABLE:
3672 	case AMDGPU_IRQ_STATE_ENABLE:
3673 		for (i = 0; i < num_xcc; i++)
3674 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3675 					      PRIV_REG_INT_ENABLE,
3676 					      state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3677 		break;
3678 	default:
3679 		break;
3680 	}
3681 
3682 	return 0;
3683 }
3684 
3685 static int gfx_v12_1_set_priv_inst_fault_state(struct amdgpu_device *adev,
3686 					       struct amdgpu_irq_src *source,
3687 					       unsigned type,
3688 					       enum amdgpu_interrupt_state state)
3689 {
3690 	int i, num_xcc;
3691 
3692 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3693 	switch (state) {
3694 	case AMDGPU_IRQ_STATE_DISABLE:
3695 	case AMDGPU_IRQ_STATE_ENABLE:
3696 		for (i = 0; i < num_xcc; i++)
3697 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3698 				       PRIV_INSTR_INT_ENABLE,
3699 				       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3700 		break;
3701 	default:
3702 		break;
3703 	}
3704 
3705 	return 0;
3706 }
3707 
3708 static void gfx_v12_1_handle_priv_fault(struct amdgpu_device *adev,
3709 					struct amdgpu_iv_entry *entry)
3710 {
3711 	u8 me_id, pipe_id, queue_id;
3712 	struct amdgpu_ring *ring;
3713 	int i, xcc_id;
3714 
3715 	me_id = (entry->ring_id & 0x0c) >> 2;
3716 	pipe_id = (entry->ring_id & 0x03) >> 0;
3717 	queue_id = (entry->ring_id & 0x70) >> 4;
3718 	xcc_id = gfx_v12_1_ih_to_xcc_inst(adev, entry->node_id);
3719 
3720 	if (xcc_id == -EINVAL)
3721 		return;
3722 
3723 	switch (me_id) {
3724 	case 0:
3725 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3726 			ring = &adev->gfx.gfx_ring[i];
3727 			/* we only enabled 1 gfx queue per pipe for now */
3728 			if (ring->me == me_id && ring->pipe == pipe_id)
3729 				drm_sched_fault(&ring->sched);
3730 		}
3731 		break;
3732 	case 1:
3733 	case 2:
3734 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3735 			ring = &adev->gfx.compute_ring
3736 					[i +
3737 					 xcc_id * adev->gfx.num_compute_rings];
3738 			if (ring->me == me_id && ring->pipe == pipe_id &&
3739 			    ring->queue == queue_id)
3740 				drm_sched_fault(&ring->sched);
3741 		}
3742 		break;
3743 	default:
3744 		BUG();
3745 		break;
3746 	}
3747 }
3748 
3749 static int gfx_v12_1_priv_reg_irq(struct amdgpu_device *adev,
3750 				  struct amdgpu_irq_src *source,
3751 				  struct amdgpu_iv_entry *entry)
3752 {
3753 	DRM_ERROR("Illegal register access in command stream\n");
3754 	gfx_v12_1_handle_priv_fault(adev, entry);
3755 	return 0;
3756 }
3757 
3758 static int gfx_v12_1_priv_inst_irq(struct amdgpu_device *adev,
3759 				   struct amdgpu_irq_src *source,
3760 				   struct amdgpu_iv_entry *entry)
3761 {
3762 	DRM_ERROR("Illegal instruction in command stream\n");
3763 	gfx_v12_1_handle_priv_fault(adev, entry);
3764 	return 0;
3765 }
3766 
3767 static void gfx_v12_1_emit_mem_sync(struct amdgpu_ring *ring)
3768 {
3769 	const unsigned int gcr_cntl =
3770 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) |
3771 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) |
3772 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) |
3773 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) |
3774 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1) |
3775 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_SCOPE(2);
3776 
3777 	/* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */
3778 	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6));
3779 	amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */
3780 	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
3781 	amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
3782 	amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
3783 	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
3784 	amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
3785 	amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
3786 }
3787 
3788 static const struct amd_ip_funcs gfx_v12_1_ip_funcs = {
3789 	.name = "gfx_v12_1",
3790 	.early_init = gfx_v12_1_early_init,
3791 	.late_init = gfx_v12_1_late_init,
3792 	.sw_init = gfx_v12_1_sw_init,
3793 	.sw_fini = gfx_v12_1_sw_fini,
3794 	.hw_init = gfx_v12_1_hw_init,
3795 	.hw_fini = gfx_v12_1_hw_fini,
3796 	.suspend = gfx_v12_1_suspend,
3797 	.resume = gfx_v12_1_resume,
3798 	.is_idle = gfx_v12_1_is_idle,
3799 	.wait_for_idle = gfx_v12_1_wait_for_idle,
3800 	.set_clockgating_state = gfx_v12_1_set_clockgating_state,
3801 	.set_powergating_state = gfx_v12_1_set_powergating_state,
3802 	.get_clockgating_state = gfx_v12_1_get_clockgating_state,
3803 };
3804 
3805 static const struct amdgpu_ring_funcs gfx_v12_1_ring_funcs_compute = {
3806 	.type = AMDGPU_RING_TYPE_COMPUTE,
3807 	.align_mask = 0xff,
3808 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
3809 	.support_64bit_ptrs = true,
3810 	.get_rptr = gfx_v12_1_ring_get_rptr_compute,
3811 	.get_wptr = gfx_v12_1_ring_get_wptr_compute,
3812 	.set_wptr = gfx_v12_1_ring_set_wptr_compute,
3813 	.emit_frame_size =
3814 		7 + /* gfx_v12_1_ring_emit_pipeline_sync */
3815 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
3816 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
3817 		2 + /* gfx_v12_1_ring_emit_vm_flush */
3818 		8 + 8 + 8 + /* gfx_v12_1_ring_emit_fence x3 for user fence, vm fence */
3819 		8, /* gfx_v12_1_emit_mem_sync */
3820 	.emit_ib_size =	7, /* gfx_v12_1_ring_emit_ib_compute */
3821 	.emit_ib = gfx_v12_1_ring_emit_ib_compute,
3822 	.emit_fence = gfx_v12_1_ring_emit_fence,
3823 	.emit_pipeline_sync = gfx_v12_1_ring_emit_pipeline_sync,
3824 	.emit_vm_flush = gfx_v12_1_ring_emit_vm_flush,
3825 	.test_ring = gfx_v12_1_ring_test_ring,
3826 	.test_ib = gfx_v12_1_ring_test_ib,
3827 	.insert_nop = amdgpu_ring_insert_nop,
3828 	.pad_ib = amdgpu_ring_generic_pad_ib,
3829 	.emit_wreg = gfx_v12_1_ring_emit_wreg,
3830 	.emit_reg_wait = gfx_v12_1_ring_emit_reg_wait,
3831 	.emit_reg_write_reg_wait = gfx_v12_1_ring_emit_reg_write_reg_wait,
3832 	.emit_mem_sync = gfx_v12_1_emit_mem_sync,
3833 };
3834 
3835 static const struct amdgpu_ring_funcs gfx_v12_1_ring_funcs_kiq = {
3836 	.type = AMDGPU_RING_TYPE_KIQ,
3837 	.align_mask = 0xff,
3838 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
3839 	.support_64bit_ptrs = true,
3840 	.get_rptr = gfx_v12_1_ring_get_rptr_compute,
3841 	.get_wptr = gfx_v12_1_ring_get_wptr_compute,
3842 	.set_wptr = gfx_v12_1_ring_set_wptr_compute,
3843 	.emit_frame_size =
3844 		7 + /* gfx_v12_1_ring_emit_pipeline_sync */
3845 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
3846 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
3847 		2 + /* gfx_v12_1_ring_emit_vm_flush */
3848 		8 + 8 + 8, /* gfx_v12_1_ring_emit_fence_kiq x3 for user fence, vm fence */
3849 	.emit_ib_size =	7, /* gfx_v12_1_ring_emit_ib_compute */
3850 	.emit_ib = gfx_v12_1_ring_emit_ib_compute,
3851 	.emit_fence = gfx_v12_1_ring_emit_fence_kiq,
3852 	.test_ring = gfx_v12_1_ring_test_ring,
3853 	.test_ib = gfx_v12_1_ring_test_ib,
3854 	.insert_nop = amdgpu_ring_insert_nop,
3855 	.pad_ib = amdgpu_ring_generic_pad_ib,
3856 	.emit_rreg = gfx_v12_1_ring_emit_rreg,
3857 	.emit_wreg = gfx_v12_1_ring_emit_wreg,
3858 	.emit_reg_wait = gfx_v12_1_ring_emit_reg_wait,
3859 	.emit_reg_write_reg_wait = gfx_v12_1_ring_emit_reg_write_reg_wait,
3860 };
3861 
3862 static void gfx_v12_1_set_ring_funcs(struct amdgpu_device *adev)
3863 {
3864 	int i, j, num_xcc;
3865 
3866 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3867 	for (i = 0; i < num_xcc; i++) {
3868 		adev->gfx.kiq[i].ring.funcs = &gfx_v12_1_ring_funcs_kiq;
3869 
3870 		for (j = 0; j < adev->gfx.num_compute_rings; j++)
3871 			adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs =
3872 						&gfx_v12_1_ring_funcs_compute;
3873 	}
3874 }
3875 
3876 static const struct amdgpu_irq_src_funcs gfx_v12_1_eop_irq_funcs = {
3877 	.set = gfx_v12_1_set_eop_interrupt_state,
3878 	.process = gfx_v12_1_eop_irq,
3879 };
3880 
3881 static const struct amdgpu_irq_src_funcs gfx_v12_1_priv_reg_irq_funcs = {
3882 	.set = gfx_v12_1_set_priv_reg_fault_state,
3883 	.process = gfx_v12_1_priv_reg_irq,
3884 };
3885 
3886 static const struct amdgpu_irq_src_funcs gfx_v12_1_priv_inst_irq_funcs = {
3887 	.set = gfx_v12_1_set_priv_inst_fault_state,
3888 	.process = gfx_v12_1_priv_inst_irq,
3889 };
3890 
3891 static void gfx_v12_1_set_irq_funcs(struct amdgpu_device *adev)
3892 {
3893 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
3894 	adev->gfx.eop_irq.funcs = &gfx_v12_1_eop_irq_funcs;
3895 
3896 	adev->gfx.priv_reg_irq.num_types = 1;
3897 	adev->gfx.priv_reg_irq.funcs = &gfx_v12_1_priv_reg_irq_funcs;
3898 
3899 	adev->gfx.priv_inst_irq.num_types = 1;
3900 	adev->gfx.priv_inst_irq.funcs = &gfx_v12_1_priv_inst_irq_funcs;
3901 }
3902 
3903 static void gfx_v12_1_set_imu_funcs(struct amdgpu_device *adev)
3904 {
3905 	if (adev->flags & AMD_IS_APU)
3906 		adev->gfx.imu.mode = MISSION_MODE;
3907 	else
3908 		adev->gfx.imu.mode = DEBUG_MODE;
3909 	if (!amdgpu_sriov_vf(adev))
3910 		adev->gfx.imu.funcs = &gfx_v12_1_imu_funcs;
3911 }
3912 
3913 static void gfx_v12_1_set_rlc_funcs(struct amdgpu_device *adev)
3914 {
3915 	adev->gfx.rlc.funcs = &gfx_v12_1_rlc_funcs;
3916 }
3917 
3918 static void gfx_v12_1_set_mqd_funcs(struct amdgpu_device *adev)
3919 {
3920 	/* set compute eng mqd */
3921 	adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size =
3922 		sizeof(struct v12_1_compute_mqd);
3923 	adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd =
3924 		gfx_v12_1_compute_mqd_init;
3925 }
3926 
3927 static void gfx_v12_1_set_user_cu_inactive_bitmap_per_sh(struct amdgpu_device *adev,
3928 							  u32 bitmap, int xcc_id)
3929 {
3930 	u32 data;
3931 
3932 	if (!bitmap)
3933 		return;
3934 
3935 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
3936 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
3937 
3938 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data);
3939 }
3940 
3941 static u32 gfx_v12_1_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev,
3942 						 int xcc_id)
3943 {
3944 	u32 data, mask;
3945 
3946 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG);
3947 	data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG);
3948 
3949 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
3950 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
3951 
3952 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
3953 
3954 	return (~data) & mask;
3955 }
3956 
3957 static int gfx_v12_1_get_cu_info(struct amdgpu_device *adev,
3958 				 struct amdgpu_cu_info *cu_info)
3959 {
3960 	int i, j, k, counter, xcc_id, active_cu_number = 0;
3961 	u32 mask, bitmap;
3962 	unsigned int disable_masks[2 * 2];
3963 
3964 	if (!adev || !cu_info)
3965 		return -EINVAL;
3966 
3967 	if (adev->gfx.config.max_shader_engines > 2 ||
3968 	    adev->gfx.config.max_sh_per_se > 2) {
3969 		dev_err(adev->dev,
3970 			"Max SE (%d) and Max SA per SE (%d) is greater than expected\n",
3971 			adev->gfx.config.max_shader_engines,
3972 			adev->gfx.config.max_sh_per_se);
3973 		return -EINVAL;
3974 	}
3975 
3976 	amdgpu_gfx_parse_disable_cu(adev, disable_masks,
3977 				    adev->gfx.config.max_shader_engines,
3978 				    adev->gfx.config.max_sh_per_se);
3979 
3980 	mutex_lock(&adev->grbm_idx_mutex);
3981 	for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
3982 		for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3983 			for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3984 				bitmap = i * adev->gfx.config.max_sh_per_se + j;
3985 				if (!((gfx_v12_1_get_sa_active_bitmap(adev, xcc_id) >> bitmap) & 1))
3986 					continue;
3987 				mask = 1;
3988 				counter = 0;
3989 				gfx_v12_1_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id);
3990 				gfx_v12_1_set_user_cu_inactive_bitmap_per_sh(
3991 					adev,
3992 					disable_masks[i * adev->gfx.config.max_sh_per_se + j],
3993 					xcc_id);
3994 				bitmap = gfx_v12_1_get_cu_active_bitmap_per_sh(adev, xcc_id);
3995 
3996 				cu_info->bitmap[xcc_id][i][j] = bitmap;
3997 
3998 				for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
3999 					if (bitmap & mask)
4000 						counter++;
4001 
4002 					mask <<= 1;
4003 				}
4004 				active_cu_number += counter;
4005 			}
4006 		}
4007 		gfx_v12_1_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, xcc_id);
4008 	}
4009 	mutex_unlock(&adev->grbm_idx_mutex);
4010 
4011 	cu_info->number = active_cu_number;
4012 	cu_info->simd_per_cu = NUM_SIMD_PER_CU_GFX12_1;
4013 	cu_info->lds_size = 320;
4014 
4015 	return 0;
4016 }
4017 
4018 const struct amdgpu_ip_block_version gfx_v12_1_ip_block = {
4019 	.type = AMD_IP_BLOCK_TYPE_GFX,
4020 	.major = 12,
4021 	.minor = 1,
4022 	.rev = 0,
4023 	.funcs = &gfx_v12_1_ip_funcs,
4024 };
4025 
4026 static int gfx_v12_1_xcp_resume(void *handle, uint32_t inst_mask)
4027 {
4028 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4029 	uint32_t tmp_mask;
4030 	int i, r;
4031 
4032 	/* TODO : Initialize golden regs */
4033 	/* gfx_v12_1_init_golden_registers(adev); */
4034 
4035 	tmp_mask = inst_mask;
4036 	for_each_inst(i, tmp_mask)
4037 		gfx_v12_1_xcc_constants_init(adev, i);
4038 
4039 	if (!amdgpu_sriov_vf(adev)) {
4040 		tmp_mask = inst_mask;
4041 		for_each_inst(i, tmp_mask) {
4042 			r = gfx_v12_1_xcc_rlc_resume(adev, i);
4043 			if (r)
4044 				return r;
4045 		}
4046 	}
4047 
4048 	r = gfx_v12_1_xcc_cp_resume(adev, inst_mask);
4049 
4050 	return r;
4051 }
4052 
4053 static int gfx_v12_1_xcp_suspend(void *handle, uint32_t inst_mask)
4054 {
4055 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4056 	int i;
4057 
4058 	for_each_inst(i, inst_mask)
4059 		gfx_v12_1_xcc_fini(adev, i);
4060 
4061 	return 0;
4062 }
4063 
4064 struct amdgpu_xcp_ip_funcs gfx_v12_1_xcp_funcs = {
4065 	.suspend = &gfx_v12_1_xcp_suspend,
4066 	.resume = &gfx_v12_1_xcp_resume
4067 };
4068