xref: /linux/drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c (revision e50a6ecebe0841d3dfa4d9415d4fae80bb5d91e8)
1 /*
2  * Copyright 2025 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/delay.h>
24 #include <linux/kernel.h>
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include "amdgpu.h"
29 #include "amdgpu_gfx.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_smu.h"
32 #include "amdgpu_atomfirmware.h"
33 #include "imu_v12_1.h"
34 #include "soc_v1_0.h"
35 #include "gfx_v12_1_pkt.h"
36 
37 #include "gc/gc_12_1_0_offset.h"
38 #include "gc/gc_12_1_0_sh_mask.h"
39 #include "soc24_enum.h"
40 #include "ivsrcid/gfx/irqsrcs_gfx_12_1_0.h"
41 
42 #include "soc15.h"
43 #include "clearstate_gfx12.h"
44 #include "v12_structs.h"
45 #include "gfx_v12_1.h"
46 #include "mes_v12_1.h"
47 
48 #define GFX12_MEC_HPD_SIZE	2048
49 #define NUM_SIMD_PER_CU_GFX12_1	4
50 
51 #define RLCG_UCODE_LOADING_START_ADDRESS	0x00002000L
52 
53 MODULE_FIRMWARE("amdgpu/gc_12_1_0_mec.bin");
54 MODULE_FIRMWARE("amdgpu/gc_12_1_0_rlc.bin");
55 
56 #define SH_MEM_ALIGNMENT_MODE_UNALIGNED_GFX12_1_0	0x00000001
57 #define DEFAULT_SH_MEM_CONFIG \
58 	((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
59 	 (SH_MEM_ALIGNMENT_MODE_UNALIGNED_GFX12_1_0 << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
60 	 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
61 
62 #define XCC_REG_RANGE_0_LOW  0x1260     /* XCC gfxdec0 lower Bound */
63 #define XCC_REG_RANGE_0_HIGH 0x3C00     /* XCC gfxdec0 upper Bound */
64 #define XCC_REG_RANGE_1_LOW  0xA000     /* XCC gfxdec1 lower Bound */
65 #define XCC_REG_RANGE_1_HIGH 0x10000    /* XCC gfxdec1 upper Bound */
66 #define NORMALIZE_XCC_REG_OFFSET(offset) \
67 	(offset & 0xFFFF)
68 
69 static void gfx_v12_1_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id);
70 static void gfx_v12_1_set_ring_funcs(struct amdgpu_device *adev);
71 static void gfx_v12_1_set_irq_funcs(struct amdgpu_device *adev);
72 static void gfx_v12_1_set_rlc_funcs(struct amdgpu_device *adev);
73 static void gfx_v12_1_set_mqd_funcs(struct amdgpu_device *adev);
74 static void gfx_v12_1_set_imu_funcs(struct amdgpu_device *adev);
75 static int gfx_v12_1_get_cu_info(struct amdgpu_device *adev,
76 				 struct amdgpu_cu_info *cu_info);
77 static uint64_t gfx_v12_1_get_gpu_clock_counter(struct amdgpu_device *adev);
78 static void gfx_v12_1_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num,
79 				       u32 sh_num, u32 instance, int xcc_id);
80 static void gfx_v12_1_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
81 				     uint32_t val);
82 static int gfx_v12_1_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
83 static void gfx_v12_1_ring_invalidate_tlbs(struct amdgpu_ring *ring,
84 					   uint16_t pasid, uint32_t flush_type,
85 					   bool all_hub, uint8_t dst_sel);
86 static void gfx_v12_1_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
87 static void gfx_v12_1_xcc_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
88 static void gfx_v12_1_update_perf_clk(struct amdgpu_device *adev,
89 				      bool enable);
90 static void gfx_v12_1_xcc_update_perf_clk(struct amdgpu_device *adev,
91 					 bool enable, int xcc_id);
92 
93 static void gfx_v12_1_kiq_set_resources(struct amdgpu_ring *kiq_ring,
94 					uint64_t queue_mask)
95 {
96 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
97 	amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
98 			  PACKET3_SET_RESOURCES_QUEUE_TYPE(0));	/* vmid_mask:0 queue_type:0 (KIQ) */
99 	amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask));	/* queue mask lo */
100 	amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask));	/* queue mask hi */
101 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask lo */
102 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask hi */
103 	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
104 	amdgpu_ring_write(kiq_ring, 0);
105 }
106 
107 static void gfx_v12_1_kiq_map_queues(struct amdgpu_ring *kiq_ring,
108 				     struct amdgpu_ring *ring)
109 {
110 	uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
111 	uint64_t wptr_addr = ring->wptr_gpu_addr;
112 	uint32_t me = 0, eng_sel = 0;
113 
114 	switch (ring->funcs->type) {
115 	case AMDGPU_RING_TYPE_COMPUTE:
116 		me = 1;
117 		eng_sel = 0;
118 		break;
119 	case AMDGPU_RING_TYPE_MES:
120 		me = 2;
121 		eng_sel = 5;
122 		break;
123 	default:
124 		WARN_ON(1);
125 	}
126 
127 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
128 	/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
129 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
130 			  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
131 			  PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
132 			  PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
133 			  PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
134 			  PACKET3_MAP_QUEUES_ME((me)) |
135 			  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
136 			  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
137 			  PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
138 			  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
139 	amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
140 	amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
141 	amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
142 	amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
143 	amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
144 }
145 
146 static void gfx_v12_1_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
147 				       struct amdgpu_ring *ring,
148 				       enum amdgpu_unmap_queues_action action,
149 				       u64 gpu_addr, u64 seq)
150 {
151 	struct amdgpu_device *adev = kiq_ring->adev;
152 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
153 
154 	if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
155 		amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr,
156 					      seq, kiq_ring->xcc_id);
157 		return;
158 	}
159 
160 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
161 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
162 			  PACKET3_UNMAP_QUEUES_ACTION(action) |
163 			  PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
164 			  PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
165 			  PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
166 	amdgpu_ring_write(kiq_ring,
167 		  PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
168 
169 	if (action == PREEMPT_QUEUES_NO_UNMAP) {
170 		amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
171 		amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
172 		amdgpu_ring_write(kiq_ring, seq);
173 	} else {
174 		amdgpu_ring_write(kiq_ring, 0);
175 		amdgpu_ring_write(kiq_ring, 0);
176 		amdgpu_ring_write(kiq_ring, 0);
177 	}
178 }
179 
180 static void gfx_v12_1_kiq_query_status(struct amdgpu_ring *kiq_ring,
181 				       struct amdgpu_ring *ring,
182 				       u64 addr, u64 seq)
183 {
184 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
185 
186 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
187 	amdgpu_ring_write(kiq_ring,
188 			  PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
189 			  PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
190 			  PACKET3_QUERY_STATUS_COMMAND(2));
191 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
192 			  PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
193 			  PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
194 	amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
195 	amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
196 	amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
197 	amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
198 }
199 
200 static void gfx_v12_1_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
201 					  uint16_t pasid,
202 					  uint32_t flush_type,
203 					  bool all_hub)
204 {
205 	gfx_v12_1_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1);
206 }
207 
208 static const struct kiq_pm4_funcs gfx_v12_1_kiq_pm4_funcs = {
209 	.kiq_set_resources = gfx_v12_1_kiq_set_resources,
210 	.kiq_map_queues = gfx_v12_1_kiq_map_queues,
211 	.kiq_unmap_queues = gfx_v12_1_kiq_unmap_queues,
212 	.kiq_query_status = gfx_v12_1_kiq_query_status,
213 	.kiq_invalidate_tlbs = gfx_v12_1_kiq_invalidate_tlbs,
214 	.set_resources_size = 8,
215 	.map_queues_size = 7,
216 	.unmap_queues_size = 6,
217 	.query_status_size = 7,
218 	.invalidate_tlbs_size = 2,
219 };
220 
221 static void gfx_v12_1_set_kiq_pm4_funcs(struct amdgpu_device *adev)
222 {
223 	int i, num_xcc;
224 
225 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
226 	for (i =0; i < num_xcc; i++)
227 		adev->gfx.kiq[i].pmf = &gfx_v12_1_kiq_pm4_funcs;
228 }
229 
230 static uint32_t gfx_v12_1_normalize_xcc_reg_offset(uint32_t reg)
231 {
232 	uint32_t normalized_reg = NORMALIZE_XCC_REG_OFFSET(reg);
233 
234 	/* If it is an XCC reg, normalize the reg to keep
235 	   lower 16 bits in local xcc */
236 
237 	if (((normalized_reg >= XCC_REG_RANGE_0_LOW) && (normalized_reg < XCC_REG_RANGE_0_HIGH)) ||
238 		((normalized_reg >= XCC_REG_RANGE_1_LOW) && (normalized_reg < XCC_REG_RANGE_1_HIGH)))
239 		return normalized_reg;
240 	else
241 		return reg;
242 }
243 
244 static void gfx_v12_1_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
245 				   int mem_space, int opt, uint32_t addr0,
246 				   uint32_t addr1, uint32_t ref,
247 				   uint32_t mask, uint32_t inv)
248 {
249 	if (mem_space == 0) {
250 		addr0 = gfx_v12_1_normalize_xcc_reg_offset(addr0);
251 		addr1 = gfx_v12_1_normalize_xcc_reg_offset(addr1);
252 	}
253 
254 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
255 	amdgpu_ring_write(ring,
256 			  /* memory (1) or register (0) */
257 			  (WAIT_REG_MEM_MEM_SPACE(mem_space) |
258 			   WAIT_REG_MEM_OPERATION(opt) | /* wait */
259 			   WAIT_REG_MEM_FUNCTION(3) |  /* equal */
260 			   WAIT_REG_MEM_ENGINE(eng_sel)));
261 
262 	if (mem_space)
263 		BUG_ON(addr0 & 0x3); /* Dword align */
264 	amdgpu_ring_write(ring, addr0);
265 	amdgpu_ring_write(ring, addr1);
266 	amdgpu_ring_write(ring, ref);
267 	amdgpu_ring_write(ring, mask);
268 	amdgpu_ring_write(ring, inv); /* poll interval */
269 }
270 
271 static int gfx_v12_1_ring_test_ring(struct amdgpu_ring *ring)
272 {
273 	struct amdgpu_device *adev = ring->adev;
274 	uint32_t scratch_reg0_offset, xcc_offset;
275 	uint32_t tmp = 0;
276 	unsigned i;
277 	int r;
278 
279 	/* Use register offset which is local to XCC in the packet */
280 	xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
281 	scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0);
282 	WREG32(scratch_reg0_offset, 0xCAFEDEAD);
283 	tmp = RREG32(scratch_reg0_offset);
284 
285 	r = amdgpu_ring_alloc(ring, 5);
286 	if (r) {
287 		dev_err(adev->dev,
288 			"amdgpu: cp failed to lock ring %d (%d).\n",
289 			ring->idx, r);
290 		return r;
291 	}
292 
293 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
294 		gfx_v12_1_ring_emit_wreg(ring, xcc_offset, 0xDEADBEEF);
295 	} else {
296 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
297 		amdgpu_ring_write(ring, xcc_offset -
298 				  PACKET3_SET_UCONFIG_REG_START);
299 		amdgpu_ring_write(ring, 0xDEADBEEF);
300 	}
301 	amdgpu_ring_commit(ring);
302 
303 	for (i = 0; i < adev->usec_timeout; i++) {
304 		tmp = RREG32(scratch_reg0_offset);
305 		if (tmp == 0xDEADBEEF)
306 			break;
307 		if (amdgpu_emu_mode == 1)
308 			msleep(1);
309 		else
310 			udelay(1);
311 	}
312 
313 	if (i >= adev->usec_timeout)
314 		r = -ETIMEDOUT;
315 	return r;
316 }
317 
318 static int gfx_v12_1_ring_test_ib(struct amdgpu_ring *ring, long timeout)
319 {
320 	struct amdgpu_device *adev = ring->adev;
321 	struct amdgpu_ib ib;
322 	struct dma_fence *f = NULL;
323 	unsigned index;
324 	uint64_t gpu_addr;
325 	volatile uint32_t *cpu_ptr;
326 	long r;
327 
328 	/* MES KIQ fw hasn't indirect buffer support for now */
329 	if (adev->enable_mes_kiq &&
330 	    ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
331 		return 0;
332 
333 	memset(&ib, 0, sizeof(ib));
334 
335 	r = amdgpu_device_wb_get(adev, &index);
336 	if (r)
337 		return r;
338 
339 	gpu_addr = adev->wb.gpu_addr + (index * 4);
340 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
341 	cpu_ptr = &adev->wb.wb[index];
342 
343 	r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
344 	if (r) {
345 		dev_err(adev->dev, "amdgpu: failed to get ib (%ld).\n", r);
346 		goto err1;
347 	}
348 
349 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
350 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
351 	ib.ptr[2] = lower_32_bits(gpu_addr);
352 	ib.ptr[3] = upper_32_bits(gpu_addr);
353 	ib.ptr[4] = 0xDEADBEEF;
354 	ib.length_dw = 5;
355 
356 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
357 	if (r)
358 		goto err2;
359 
360 	r = dma_fence_wait_timeout(f, false, timeout);
361 	if (r == 0) {
362 		r = -ETIMEDOUT;
363 		goto err2;
364 	} else if (r < 0) {
365 		goto err2;
366 	}
367 
368 	if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF)
369 		r = 0;
370 	else
371 		r = -EINVAL;
372 err2:
373 	amdgpu_ib_free(&ib, NULL);
374 	dma_fence_put(f);
375 err1:
376 	amdgpu_device_wb_free(adev, index);
377 	return r;
378 }
379 
380 static void gfx_v12_1_free_microcode(struct amdgpu_device *adev)
381 {
382 	amdgpu_ucode_release(&adev->gfx.rlc_fw);
383 	amdgpu_ucode_release(&adev->gfx.mec_fw);
384 
385 	kfree(adev->gfx.rlc.register_list_format);
386 }
387 
388 static int gfx_v12_1_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix)
389 {
390 	const struct psp_firmware_header_v1_0 *toc_hdr;
391 	int err = 0;
392 
393 	err = amdgpu_ucode_request(adev, &adev->psp.toc_fw,
394 				   AMDGPU_UCODE_REQUIRED,
395 				   "amdgpu/%s_toc.bin", ucode_prefix);
396 	if (err)
397 		goto out;
398 
399 	toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
400 	adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
401 	adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
402 	adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
403 	adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
404 			le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
405 	return 0;
406 out:
407 	amdgpu_ucode_release(&adev->psp.toc_fw);
408 	return err;
409 }
410 
411 static int gfx_v12_1_init_microcode(struct amdgpu_device *adev)
412 {
413 	char ucode_prefix[15];
414 	int err;
415 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
416 	uint16_t version_major;
417 	uint16_t version_minor;
418 
419 	DRM_DEBUG("\n");
420 
421 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
422 
423 	if (!amdgpu_sriov_vf(adev)) {
424 		err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
425 					   AMDGPU_UCODE_REQUIRED,
426 					   "amdgpu/%s_rlc.bin", ucode_prefix);
427 		if (err)
428 			goto out;
429 		rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
430 		version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
431 		version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
432 		err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
433 		if (err)
434 			goto out;
435 	}
436 
437 	err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
438 				   AMDGPU_UCODE_REQUIRED,
439 				   "amdgpu/%s_mec.bin", ucode_prefix);
440 	if (err)
441 		goto out;
442 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC);
443 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK);
444 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK);
445 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK);
446 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK);
447 
448 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
449 		err = gfx_v12_1_init_toc_microcode(adev, ucode_prefix);
450 
451 	/* only one MEC for gfx 12 */
452 	adev->gfx.mec2_fw = NULL;
453 
454 	if (adev->gfx.imu.funcs) {
455 		if (adev->gfx.imu.funcs->init_microcode) {
456 			err = adev->gfx.imu.funcs->init_microcode(adev);
457 			if (err)
458 				dev_err(adev->dev, "Failed to load imu firmware!\n");
459 		}
460 	}
461 
462 out:
463 	if (err) {
464 		amdgpu_ucode_release(&adev->gfx.rlc_fw);
465 		amdgpu_ucode_release(&adev->gfx.mec_fw);
466 	}
467 
468 	return err;
469 }
470 
471 static u32 gfx_v12_1_get_csb_size(struct amdgpu_device *adev)
472 {
473 	u32 count = 0;
474 	const struct cs_section_def *sect = NULL;
475 	const struct cs_extent_def *ext = NULL;
476 
477 	count += 1;
478 
479 	for (sect = gfx12_cs_data; sect->section != NULL; ++sect) {
480 		if (sect->id == SECT_CONTEXT) {
481 			for (ext = sect->section; ext->extent != NULL; ++ext)
482 				count += 2 + ext->reg_count;
483 		} else
484 			return 0;
485 	}
486 
487 	return count;
488 }
489 
490 static void gfx_v12_1_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
491 {
492 	u32 count = 0, clustercount = 0, i;
493 	const struct cs_section_def *sect = NULL;
494 	const struct cs_extent_def *ext = NULL;
495 
496 	if (adev->gfx.rlc.cs_data == NULL)
497 		return;
498 	if (buffer == NULL)
499 		return;
500 
501 	count += 1;
502 
503 	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
504 		if (sect->id == SECT_CONTEXT) {
505 			for (ext = sect->section; ext->extent != NULL; ++ext) {
506 				clustercount++;
507 				buffer[count++] = ext->reg_count;
508 				buffer[count++] = ext->reg_index;
509 
510 				for (i = 0; i < ext->reg_count; i++)
511 					buffer[count++] = cpu_to_le32(ext->extent[i]);
512 			}
513 		} else
514 			return;
515 	}
516 
517 	buffer[0] = clustercount;
518 }
519 
520 static void gfx_v12_1_rlc_fini(struct amdgpu_device *adev)
521 {
522 	/* clear state block */
523 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
524 			&adev->gfx.rlc.clear_state_gpu_addr,
525 			(void **)&adev->gfx.rlc.cs_ptr);
526 
527 	/* jump table block */
528 	amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
529 			&adev->gfx.rlc.cp_table_gpu_addr,
530 			(void **)&adev->gfx.rlc.cp_table_ptr);
531 }
532 
533 static void gfx_v12_1_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
534 {
535 	int xcc_id, num_xcc;
536 	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
537 
538 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
539 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
540 		reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)];
541 		reg_access_ctrl->scratch_reg0 =
542 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG0);
543 		reg_access_ctrl->scratch_reg1 =
544 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG1);
545 		reg_access_ctrl->scratch_reg2 =
546 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG2);
547 		reg_access_ctrl->scratch_reg3 =
548 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG3);
549 		reg_access_ctrl->grbm_cntl =
550 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL);
551 		reg_access_ctrl->grbm_idx =
552 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX);
553 		reg_access_ctrl->spare_int =
554 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT_0);
555 	}
556 	adev->gfx.rlc.rlcg_reg_access_supported = true;
557 }
558 
559 static int gfx_v12_1_rlc_init(struct amdgpu_device *adev)
560 {
561 	const struct cs_section_def *cs_data;
562 	int r, i, num_xcc;
563 
564 	adev->gfx.rlc.cs_data = gfx12_cs_data;
565 
566 	cs_data = adev->gfx.rlc.cs_data;
567 
568 	if (cs_data) {
569 		/* init clear state block */
570 		r = amdgpu_gfx_rlc_init_csb(adev);
571 		if (r)
572 			return r;
573 	}
574 
575 	/* init spm vmid with 0xf */
576 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
577 	for (i = 0; i < num_xcc; i++) {
578 		if (adev->gfx.rlc.funcs->update_spm_vmid)
579 			adev->gfx.rlc.funcs->update_spm_vmid(adev, i, NULL, 0xf);
580 	}
581 
582 	return 0;
583 }
584 
585 static void gfx_v12_1_mec_fini(struct amdgpu_device *adev)
586 {
587 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
588 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
589 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL);
590 }
591 
592 static int gfx_v12_1_mec_init(struct amdgpu_device *adev)
593 {
594 	int r, i, num_xcc;
595 	u32 *hpd;
596 	size_t mec_hpd_size;
597 
598 	bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
599 
600 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
601 	for (i = 0; i < num_xcc; i++)
602 		bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap,
603 			    AMDGPU_MAX_COMPUTE_QUEUES);
604 
605 	/* take ownership of the relevant compute queues */
606 	amdgpu_gfx_compute_queue_acquire(adev);
607 	mec_hpd_size = adev->gfx.num_compute_rings *
608 		       GFX12_MEC_HPD_SIZE * num_xcc;
609 
610 	if (mec_hpd_size) {
611 		r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
612 					      AMDGPU_GEM_DOMAIN_GTT,
613 					      &adev->gfx.mec.hpd_eop_obj,
614 					      &adev->gfx.mec.hpd_eop_gpu_addr,
615 					      (void **)&hpd);
616 		if (r) {
617 			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
618 			gfx_v12_1_mec_fini(adev);
619 			return r;
620 		}
621 
622 		memset(hpd, 0, mec_hpd_size);
623 
624 		amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
625 		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
626 	}
627 
628 	return 0;
629 }
630 
631 static uint32_t wave_read_ind(struct amdgpu_device *adev,
632 			      uint32_t xcc_id, uint32_t wave,
633 			      uint32_t address)
634 {
635 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
636 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
637 		(address << SQ_IND_INDEX__INDEX__SHIFT));
638 	return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
639 }
640 
641 static void wave_read_regs(struct amdgpu_device *adev,
642 			   uint32_t xcc_id, uint32_t wave,
643 			   uint32_t thread, uint32_t regno,
644 			   uint32_t num, uint32_t *out)
645 {
646 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
647 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
648 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
649 		(thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
650 		(SQ_IND_INDEX__AUTO_INCR_MASK));
651 	while (num--)
652 		*(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
653 }
654 
655 static void gfx_v12_1_read_wave_data(struct amdgpu_device *adev,
656 				     uint32_t xcc_id,
657 				     uint32_t simd, uint32_t wave,
658 				     uint32_t *dst, int *no_fields)
659 {
660 	/* in gfx12 the SIMD_ID is specified as part of the INSTANCE
661 	 * field when performing a select_se_sh so it should be
662 	 * zero here */
663 	WARN_ON(simd != 0);
664 
665 	/* type 4 wave data */
666 	dst[(*no_fields)++] = 4;
667 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_STATUS);
668 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_PC_LO);
669 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_PC_HI);
670 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXEC_LO);
671 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXEC_HI);
672 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_HW_ID1);
673 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_HW_ID2);
674 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_GPR_ALLOC);
675 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_LDS_ALLOC);
676 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_IB_STS);
677 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_IB_STS2);
678 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_IB_DBG1);
679 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_M0);
680 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_MODE);
681 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_STATE_PRIV);
682 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXCP_FLAG_PRIV);
683 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXCP_FLAG_USER);
684 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_TRAP_CTRL);
685 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_ACTIVE);
686 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_VALID_AND_IDLE);
687 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_DVGPR_ALLOC_LO);
688 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_DVGPR_ALLOC_HI);
689 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_SCHED_MODE);
690 }
691 
692 static void gfx_v12_1_read_wave_sgprs(struct amdgpu_device *adev,
693 				      uint32_t xcc_id, uint32_t simd,
694 				      uint32_t wave, uint32_t start,
695 				      uint32_t size, uint32_t *dst)
696 {
697 	WARN_ON(simd != 0);
698 
699 	wave_read_regs(adev, xcc_id, wave, 0,
700 		       start + SQIND_WAVE_SGPRS_OFFSET,
701 		       size, dst);
702 }
703 
704 static void gfx_v12_1_read_wave_vgprs(struct amdgpu_device *adev,
705 				      uint32_t xcc_id, uint32_t simd,
706 				      uint32_t wave, uint32_t thread,
707 				      uint32_t start, uint32_t size,
708 				      uint32_t *dst)
709 {
710 	wave_read_regs(adev, xcc_id, wave, thread,
711 		       start + SQIND_WAVE_VGPRS_OFFSET,
712 		       size, dst);
713 }
714 
715 static void gfx_v12_1_select_me_pipe_q(struct amdgpu_device *adev,
716 				       u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
717 {
718 	soc_v1_0_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id));
719 }
720 
721 static int gfx_v12_1_get_xccs_per_xcp(struct amdgpu_device *adev)
722 {
723 	/* Fill this in when the interface is ready */
724 	return 1;
725 }
726 
727 static int gfx_v12_1_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node)
728 {
729 	int logic_xcc;
730 	int xcc = (ih_node & 0x7) - 2 + (ih_node >> 3) * 4;
731 
732 	for (logic_xcc = 0; logic_xcc < NUM_XCC(adev->gfx.xcc_mask); logic_xcc++) {
733 		if (xcc == GET_INST(GC, logic_xcc))
734 			return logic_xcc;
735 	}
736 
737 	dev_err(adev->dev, "Couldn't find xcc mapping from IH node");
738 	return -EINVAL;
739 }
740 
741 static const struct amdgpu_gfx_funcs gfx_v12_1_gfx_funcs = {
742 	.get_gpu_clock_counter = &gfx_v12_1_get_gpu_clock_counter,
743 	.select_se_sh = &gfx_v12_1_xcc_select_se_sh,
744 	.read_wave_data = &gfx_v12_1_read_wave_data,
745 	.read_wave_sgprs = &gfx_v12_1_read_wave_sgprs,
746 	.read_wave_vgprs = &gfx_v12_1_read_wave_vgprs,
747 	.select_me_pipe_q = &gfx_v12_1_select_me_pipe_q,
748 	.update_perfmon_mgcg = &gfx_v12_1_update_perf_clk,
749 	.get_xccs_per_xcp = &gfx_v12_1_get_xccs_per_xcp,
750 	.ih_node_to_logical_xcc = &gfx_v12_1_ih_to_xcc_inst,
751 };
752 
753 static int gfx_v12_1_gpu_early_init(struct amdgpu_device *adev)
754 {
755 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
756 	case IP_VERSION(12, 1, 0):
757 		adev->gfx.config.max_hw_contexts = 8;
758 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
759 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
760 		adev->gfx.config.sc_hiz_tile_fifo_size = 0;
761 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
762 		break;
763 	default:
764 		BUG();
765 		break;
766 	}
767 
768 	return 0;
769 }
770 
771 static int gfx_v12_1_compute_ring_init(struct amdgpu_device *adev, int ring_id,
772 				       int xcc_id, int mec, int pipe, int queue)
773 {
774 	int r;
775 	unsigned irq_type;
776 	struct amdgpu_ring *ring;
777 	unsigned int hw_prio;
778 	uint32_t xcc_doorbell_start;
779 
780 	ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings +
781 				       ring_id];
782 
783 	/* mec0 is me1 */
784 	ring->xcc_id = xcc_id;
785 	ring->me = mec + 1;
786 	ring->pipe = pipe;
787 	ring->queue = queue;
788 
789 	ring->ring_obj = NULL;
790 	ring->use_doorbell = true;
791 	xcc_doorbell_start = adev->doorbell_index.mec_ring0 +
792 			     xcc_id * adev->doorbell_index.xcc_doorbell_range;
793 	ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1;
794 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr +
795 			     (ring_id + xcc_id * adev->gfx.num_compute_rings) *
796 			     GFX12_MEC_HPD_SIZE;
797 	ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
798 	sprintf(ring->name, "comp_%d.%d.%d.%d",
799 			ring->xcc_id, ring->me, ring->pipe, ring->queue);
800 
801 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
802 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
803 		+ ring->pipe;
804 	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
805 			AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
806 	/* type-2 packets are deprecated on MEC, use type-3 instead */
807 	r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
808 			     hw_prio, NULL);
809 	if (r)
810 		return r;
811 
812 	return 0;
813 }
814 
815 static struct {
816 	SOC24_FIRMWARE_ID	id;
817 	unsigned int		offset;
818 	unsigned int		size;
819 	unsigned int		size_x16;
820 	unsigned int		num_inst;
821 } rlc_autoload_info[SOC24_FIRMWARE_ID_MAX];
822 
823 #define RLC_TOC_OFFSET_DWUNIT   8
824 #define RLC_SIZE_MULTIPLE       1024
825 #define RLC_TOC_UMF_SIZE_inM	23ULL
826 #define RLC_TOC_FORMAT_API	165ULL
827 
828 #define RLC_NUM_INS_CODE0   1
829 #define RLC_NUM_INS_CODE1   8
830 #define RLC_NUM_INS_CODE2   2
831 #define RLC_NUM_INS_CODE3   16
832 
833 static void gfx_v12_1_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc)
834 {
835 	RLC_TABLE_OF_CONTENT_V2 *ucode = rlc_toc;
836 
837 	while (ucode && (ucode->id > SOC24_FIRMWARE_ID_INVALID)) {
838 		rlc_autoload_info[ucode->id].id = ucode->id;
839 		rlc_autoload_info[ucode->id].offset =
840 			ucode->offset * RLC_TOC_OFFSET_DWUNIT * 4;
841 		rlc_autoload_info[ucode->id].size =
842 			ucode->size_x16 ? ucode->size * RLC_SIZE_MULTIPLE * 4 :
843 					  ucode->size * 4;
844 		switch (ucode->vfflr_image_code) {
845 		case 0:
846 			rlc_autoload_info[ucode->id].num_inst =
847 				RLC_NUM_INS_CODE0;
848 			break;
849 		case 1:
850 			rlc_autoload_info[ucode->id].num_inst =
851 				RLC_NUM_INS_CODE1;
852 			break;
853 		case 2:
854 			rlc_autoload_info[ucode->id].num_inst =
855 				RLC_NUM_INS_CODE2;
856 			break;
857 		case 3:
858 			rlc_autoload_info[ucode->id].num_inst =
859 				RLC_NUM_INS_CODE3;
860 			break;
861 		default:
862 			dev_err(adev->dev,
863 				"Invalid Instance number detected\n");
864 			break;
865 		}
866 		ucode++;
867 	}
868 }
869 
870 static uint32_t gfx_v12_1_calc_toc_total_size(struct amdgpu_device *adev)
871 {
872 	uint32_t total_size = 0;
873 	SOC24_FIRMWARE_ID id;
874 
875 	gfx_v12_1_parse_rlc_toc(adev, adev->psp.toc.start_addr);
876 
877 	for (id = SOC24_FIRMWARE_ID_RLC_G_UCODE; id < SOC24_FIRMWARE_ID_MAX; id++)
878 		total_size += rlc_autoload_info[id].size;
879 
880 	/* In case the offset in rlc toc ucode is aligned */
881 	if (total_size < rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].offset)
882 		total_size = rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].offset +
883 			rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].size;
884 	if (total_size < (RLC_TOC_UMF_SIZE_inM << 20))
885 		total_size = RLC_TOC_UMF_SIZE_inM << 20;
886 
887 	return total_size;
888 }
889 
890 static int gfx_v12_1_rlc_autoload_buffer_init(struct amdgpu_device *adev)
891 {
892 	int r;
893 	uint32_t total_size;
894 
895 	total_size = gfx_v12_1_calc_toc_total_size(adev);
896 
897 	r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024,
898 				      AMDGPU_GEM_DOMAIN_VRAM,
899 				      &adev->gfx.rlc.rlc_autoload_bo,
900 				      &adev->gfx.rlc.rlc_autoload_gpu_addr,
901 				      (void **)&adev->gfx.rlc.rlc_autoload_ptr);
902 
903 	if (r) {
904 		dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
905 		return r;
906 	}
907 
908 	return 0;
909 }
910 
911 static void gfx_v12_1_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev,
912 						       SOC24_FIRMWARE_ID id,
913 						       const void *fw_data,
914 						       uint32_t fw_size)
915 {
916 	uint32_t toc_offset;
917 	uint32_t toc_fw_size, toc_fw_inst_size;
918 	char *ptr = adev->gfx.rlc.rlc_autoload_ptr;
919 	int i, num_inst;
920 
921 	if (id <= SOC24_FIRMWARE_ID_INVALID || id >= SOC24_FIRMWARE_ID_MAX)
922 		return;
923 
924 	toc_offset = rlc_autoload_info[id].offset;
925 	toc_fw_size = rlc_autoload_info[id].size;
926 	num_inst = rlc_autoload_info[id].num_inst;
927 	toc_fw_inst_size = toc_fw_size / num_inst;
928 
929 	if (fw_size == 0)
930 		fw_size = toc_fw_inst_size;
931 
932 	if (fw_size > toc_fw_inst_size)
933 		fw_size = toc_fw_inst_size;
934 
935 	for (i = 0; i < num_inst; i++) {
936 		if ((num_inst == RLC_NUM_INS_CODE0) ||
937 		    ((1 << (i / 2)) & adev->gfx.xcc_mask)) {
938 			memcpy(ptr + toc_offset + i * toc_fw_inst_size, fw_data, fw_size);
939 
940 			if (fw_size < toc_fw_inst_size)
941 				memset(ptr + toc_offset + fw_size + i * toc_fw_inst_size,
942 				       0, toc_fw_inst_size - fw_size);
943 		}
944 	}
945 }
946 
947 static void
948 gfx_v12_1_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev)
949 {
950 	void *data;
951 	uint32_t size;
952 	uint32_t *toc_ptr;
953 
954 	data = adev->psp.toc.start_addr;
955 	size = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_TOC].size;
956 
957 	toc_ptr = (uint32_t *)data + size / 4 - 2;
958 	*toc_ptr = (RLC_TOC_FORMAT_API << 24) | 0x1;
959 
960 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_TOC,
961 						   data, size);
962 }
963 
964 static void
965 gfx_v12_1_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev)
966 {
967 	const __le32 *fw_data;
968 	uint32_t fw_size;
969 	const struct gfx_firmware_header_v2_0 *cpv2_hdr;
970 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
971 	const struct rlc_firmware_header_v2_1 *rlcv21_hdr;
972 	const struct rlc_firmware_header_v2_2 *rlcv22_hdr;
973 	uint16_t version_major, version_minor;
974 
975 	/* mec ucode */
976 	cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
977 		adev->gfx.mec_fw->data;
978 	/* instruction */
979 	fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
980 		le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
981 	fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
982 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC,
983 						   fw_data, fw_size);
984 	/* data */
985 	fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
986 		le32_to_cpu(cpv2_hdr->data_offset_bytes));
987 	fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
988 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P0_STACK,
989 						   fw_data, fw_size);
990 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P1_STACK,
991 						   fw_data, fw_size);
992 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P2_STACK,
993 						   fw_data, fw_size);
994 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P3_STACK,
995 						   fw_data, fw_size);
996 
997 	/* rlc ucode */
998 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)
999 		adev->gfx.rlc_fw->data;
1000 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1001 			le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes));
1002 	fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes);
1003 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_G_UCODE,
1004 						   fw_data, fw_size);
1005 
1006 	version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1007 	version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1008 	if (version_major == 2) {
1009 		if (version_minor >= 1) {
1010 			rlcv21_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
1011 
1012 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1013 					le32_to_cpu(rlcv21_hdr->save_restore_list_gpm_offset_bytes));
1014 			fw_size = le32_to_cpu(rlcv21_hdr->save_restore_list_gpm_size_bytes);
1015 			gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLCG_SCRATCH,
1016 						   fw_data, fw_size);
1017 
1018 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1019 					le32_to_cpu(rlcv21_hdr->save_restore_list_srm_offset_bytes));
1020 			fw_size = le32_to_cpu(rlcv21_hdr->save_restore_list_srm_size_bytes);
1021 			gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_SRM_ARAM,
1022 						   fw_data, fw_size);
1023 		}
1024 		if (version_minor >= 2) {
1025 			rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1026 
1027 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1028 					le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes));
1029 			fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes);
1030 			gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLX6_UCODE,
1031 						   fw_data, fw_size);
1032 
1033 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1034 					le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes));
1035 			fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes);
1036 			gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLX6_DRAM_BOOT,
1037 						   fw_data, fw_size);
1038 		}
1039 	}
1040 }
1041 
1042 static void
1043 gfx_v12_1_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev)
1044 {
1045 	const __le32 *fw_data;
1046 	uint32_t fw_size;
1047 	const struct sdma_firmware_header_v3_0 *sdma_hdr;
1048 
1049 	if (adev->sdma.instance[0].fw) {
1050 		sdma_hdr = (const struct sdma_firmware_header_v3_0 *)
1051 			adev->sdma.instance[0].fw->data;
1052 		fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1053 				le32_to_cpu(sdma_hdr->ucode_offset_bytes));
1054 		fw_size = le32_to_cpu(sdma_hdr->ucode_size_bytes);
1055 
1056 		gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_SDMA_UCODE_TH0,
1057 							   fw_data, fw_size);
1058 	}
1059 }
1060 
1061 static void
1062 gfx_v12_1_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev)
1063 {
1064 	const __le32 *fw_data;
1065 	unsigned fw_size;
1066 	const struct mes_firmware_header_v1_0 *mes_hdr;
1067 	int pipe, ucode_id, data_id;
1068 
1069 	for (pipe = 0; pipe < 2; pipe++) {
1070 		if (pipe == 0) {
1071 			ucode_id = SOC24_FIRMWARE_ID_RS64_MES_P0;
1072 			data_id  = SOC24_FIRMWARE_ID_RS64_MES_P0_STACK;
1073 		} else {
1074 			ucode_id = SOC24_FIRMWARE_ID_RS64_MES_P1;
1075 			data_id  = SOC24_FIRMWARE_ID_RS64_MES_P1_STACK;
1076 		}
1077 
1078 		mes_hdr = (const struct mes_firmware_header_v1_0 *)
1079 			adev->mes.fw[pipe]->data;
1080 
1081 		fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1082 				le32_to_cpu(mes_hdr->mes_ucode_offset_bytes));
1083 		fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
1084 
1085 		gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, ucode_id, fw_data, fw_size);
1086 
1087 		fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1088 				le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
1089 		fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
1090 
1091 		gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, data_id, fw_data, fw_size);
1092 	}
1093 }
1094 
1095 static int gfx_v12_1_rlc_backdoor_autoload_enable(struct amdgpu_device *adev)
1096 {
1097 	uint32_t rlc_g_offset, rlc_g_size;
1098 	uint64_t gpu_addr;
1099 	uint32_t data;
1100 	int i, num_xcc;
1101 
1102 	/* RLC autoload sequence 2: copy ucode */
1103 	gfx_v12_1_rlc_backdoor_autoload_copy_sdma_ucode(adev);
1104 	gfx_v12_1_rlc_backdoor_autoload_copy_gfx_ucode(adev);
1105 	gfx_v12_1_rlc_backdoor_autoload_copy_mes_ucode(adev);
1106 	gfx_v12_1_rlc_backdoor_autoload_copy_toc_ucode(adev);
1107 
1108 	rlc_g_offset = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_G_UCODE].offset;
1109 	rlc_g_size = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_G_UCODE].size;
1110 	gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset - adev->gmc.vram_start;
1111 
1112 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1113 	for (i = 0; i < num_xcc; i++) {
1114 		WREG32_SOC15(GC, GET_INST(GC, i),
1115 			     regGFX_IMU_RLC_BOOTLOADER_ADDR_HI,
1116 			     upper_32_bits(gpu_addr));
1117 		WREG32_SOC15(GC, GET_INST(GC, i),
1118 			     regGFX_IMU_RLC_BOOTLOADER_ADDR_LO,
1119 			     lower_32_bits(gpu_addr));
1120 		WREG32_SOC15(GC, GET_INST(GC, i),
1121 			     regGFX_IMU_RLC_BOOTLOADER_SIZE,
1122 			     rlc_g_size);
1123 	}
1124 
1125 	if (adev->gfx.imu.funcs) {
1126 		/* RLC autoload sequence 3: load IMU fw */
1127 		if (adev->gfx.imu.funcs->load_microcode)
1128 			adev->gfx.imu.funcs->load_microcode(adev);
1129 	}
1130 
1131 	/* unhalt rlc to start autoload */
1132 	for (i = 0; i < num_xcc; i++) {
1133 		data = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_THREAD_ENABLE);
1134 		data = REG_SET_FIELD(data, RLC_GPM_THREAD_ENABLE, THREAD0_ENABLE, 1);
1135 		data = REG_SET_FIELD(data, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1);
1136 		WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_THREAD_ENABLE, data);
1137 		WREG32_SOC15(GC, GET_INST(GC, i), regRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK);
1138 	}
1139 
1140 	return 0;
1141 }
1142 
1143 static int gfx_v12_1_sw_init(struct amdgpu_ip_block *ip_block)
1144 {
1145 	int i, j, k, r, ring_id = 0;
1146 	unsigned num_compute_rings;
1147 	int xcc_id, num_xcc;
1148 	struct amdgpu_device *adev = ip_block->adev;
1149 
1150 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1151 	case IP_VERSION(12, 1, 0):
1152 		adev->gfx.mec.num_mec = 1;
1153 		adev->gfx.mec.num_pipe_per_mec = 4;
1154 		adev->gfx.mec.num_queue_per_pipe = 8;
1155 		break;
1156 	default:
1157 		adev->gfx.mec.num_mec = 2;
1158 		adev->gfx.mec.num_pipe_per_mec = 2;
1159 		adev->gfx.mec.num_queue_per_pipe = 4;
1160 		break;
1161 	}
1162 
1163 	/* recalculate compute rings to use based on hardware configuration */
1164 	num_compute_rings = (adev->gfx.mec.num_pipe_per_mec *
1165 			     adev->gfx.mec.num_queue_per_pipe) / 2;
1166 	adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings,
1167 					  num_compute_rings);
1168 
1169 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1170 
1171 	/* EOP Event */
1172 	r = amdgpu_irq_add_id(adev, SOC_V1_0_IH_CLIENTID_GRBM_CP,
1173 			      GFX_12_1_0__SRCID__CP_EOP_INTERRUPT,
1174 			      &adev->gfx.eop_irq);
1175 	if (r)
1176 		return r;
1177 
1178 	/* Privileged reg */
1179 	r = amdgpu_irq_add_id(adev, SOC_V1_0_IH_CLIENTID_GRBM_CP,
1180 			      GFX_12_1_0__SRCID__CP_PRIV_REG_FAULT,
1181 			      &adev->gfx.priv_reg_irq);
1182 	if (r)
1183 		return r;
1184 
1185 	/* Privileged inst */
1186 	r = amdgpu_irq_add_id(adev, SOC_V1_0_IH_CLIENTID_GRBM_CP,
1187 			      GFX_12_1_0__SRCID__CP_PRIV_INSTR_FAULT,
1188 			      &adev->gfx.priv_inst_irq);
1189 	if (r)
1190 		return r;
1191 
1192 	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1193 
1194 	r = gfx_v12_1_rlc_init(adev);
1195 	if (r) {
1196 		dev_err(adev->dev, "Failed to init rlc BOs!\n");
1197 		return r;
1198 	}
1199 
1200 	r = gfx_v12_1_mec_init(adev);
1201 	if (r) {
1202 		dev_err(adev->dev, "Failed to init MEC BOs!\n");
1203 		return r;
1204 	}
1205 
1206 	/* set up the compute queues - allocate horizontally across pipes */
1207 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1208 		ring_id = 0;
1209 		for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1210 			for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1211 				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1212 					if (!amdgpu_gfx_is_mec_queue_enabled(adev,
1213 								xcc_id, i, k, j))
1214 						continue;
1215 
1216 					r = gfx_v12_1_compute_ring_init(adev, ring_id,
1217 								xcc_id, i, k, j);
1218 					if (r)
1219 						return r;
1220 
1221 					ring_id++;
1222 				}
1223 			}
1224 		}
1225 
1226 		if (!adev->enable_mes_kiq) {
1227 			r = amdgpu_gfx_kiq_init(adev, GFX12_MEC_HPD_SIZE, xcc_id);
1228 			if (r) {
1229 				dev_err(adev->dev, "Failed to init KIQ BOs!\n");
1230 				return r;
1231 			}
1232 
1233 			r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
1234 			if (r)
1235 				return r;
1236 		}
1237 
1238 		r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v12_1_compute_mqd), xcc_id);
1239 		if (r)
1240 			return r;
1241 	}
1242 
1243 	/* allocate visible FB for rlc auto-loading fw */
1244 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1245 		r = gfx_v12_1_rlc_autoload_buffer_init(adev);
1246 		if (r)
1247 			return r;
1248 	}
1249 
1250 	r = gfx_v12_1_gpu_early_init(adev);
1251 	if (r)
1252 		return r;
1253 
1254 	r = amdgpu_gfx_sysfs_init(adev);
1255 	if (r)
1256 		return r;
1257 
1258 	return 0;
1259 }
1260 
1261 static void gfx_v12_1_rlc_autoload_buffer_fini(struct amdgpu_device *adev)
1262 {
1263 	amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo,
1264 			&adev->gfx.rlc.rlc_autoload_gpu_addr,
1265 			(void **)&adev->gfx.rlc.rlc_autoload_ptr);
1266 }
1267 
1268 static int gfx_v12_1_sw_fini(struct amdgpu_ip_block *ip_block)
1269 {
1270 	int i, num_xcc;
1271 	struct amdgpu_device *adev = ip_block->adev;
1272 
1273 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1274 	for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++)
1275 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1276 
1277 	for (i = 0; i < num_xcc; i++) {
1278 		amdgpu_gfx_mqd_sw_fini(adev, i);
1279 
1280 		if (!adev->enable_mes_kiq) {
1281 			amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring);
1282 			amdgpu_gfx_kiq_fini(adev, i);
1283 		}
1284 	}
1285 
1286 	gfx_v12_1_rlc_fini(adev);
1287 	gfx_v12_1_mec_fini(adev);
1288 
1289 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
1290 		gfx_v12_1_rlc_autoload_buffer_fini(adev);
1291 
1292 	gfx_v12_1_free_microcode(adev);
1293 
1294 	return 0;
1295 }
1296 
1297 static void gfx_v12_1_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num,
1298 				       u32 sh_num, u32 instance, int xcc_id)
1299 {
1300 	u32 data;
1301 
1302 	if (instance == 0xffffffff)
1303 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
1304 				     INSTANCE_BROADCAST_WRITES, 1);
1305 	else
1306 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
1307 				     instance);
1308 
1309 	if (se_num == 0xffffffff)
1310 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
1311 				     1);
1312 	else
1313 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1314 
1315 	if (sh_num == 0xffffffff)
1316 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES,
1317 				     1);
1318 	else
1319 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num);
1320 
1321 	WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data);
1322 }
1323 
1324 static u32 gfx_v12_1_get_sa_active_bitmap(struct amdgpu_device *adev,
1325 					  int xcc_id)
1326 {
1327 	u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask;
1328 
1329 	gc_disabled_sa_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SA_UNIT_DISABLE);
1330 	gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask,
1331 					    CC_GC_SA_UNIT_DISABLE,
1332 					    SA_DISABLE);
1333 	gc_user_disabled_sa_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SA_UNIT_DISABLE);
1334 	gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask,
1335 						 GC_USER_SA_UNIT_DISABLE,
1336 						 SA_DISABLE);
1337 	sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se *
1338 					    adev->gfx.config.max_shader_engines);
1339 
1340 	return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask));
1341 }
1342 
1343 static u32 gfx_v12_1_get_rb_active_bitmap(struct amdgpu_device *adev,
1344 					  int xcc_id)
1345 {
1346 	u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask;
1347 	u32 rb_mask;
1348 
1349 	gc_disabled_rb_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
1350 					   regCC_RB_BACKEND_DISABLE);
1351 	gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask,
1352 					    CC_RB_BACKEND_DISABLE,
1353 					    BACKEND_DISABLE);
1354 	gc_user_disabled_rb_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
1355 						regGC_USER_RB_BACKEND_DISABLE);
1356 	gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask,
1357 						 GC_USER_RB_BACKEND_DISABLE,
1358 						 BACKEND_DISABLE);
1359 	rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se *
1360 					    adev->gfx.config.max_shader_engines);
1361 
1362 	return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask));
1363 }
1364 
1365 static void gfx_v12_1_setup_rb(struct amdgpu_device *adev)
1366 {
1367 	u32 rb_bitmap_width_per_sa;
1368 	u32 max_sa;
1369 	u32 active_sa_bitmap;
1370 	u32 global_active_rb_bitmap;
1371 	u32 active_rb_bitmap = 0;
1372 	u32 i;
1373 	int xcc_id;
1374 
1375 	for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
1376 		/* query sa bitmap from SA_UNIT_DISABLE registers */
1377 		active_sa_bitmap = gfx_v12_1_get_sa_active_bitmap(adev, xcc_id);
1378 		/* query rb bitmap from RB_BACKEND_DISABLE registers */
1379 		global_active_rb_bitmap = gfx_v12_1_get_rb_active_bitmap(adev, xcc_id);
1380 
1381 		/* generate active rb bitmap according to active sa bitmap */
1382 		max_sa = adev->gfx.config.max_shader_engines *
1383 			 adev->gfx.config.max_sh_per_se;
1384 		rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
1385 					 adev->gfx.config.max_sh_per_se;
1386 		for (i = 0; i < max_sa; i++) {
1387 			if (active_sa_bitmap & (1 << i))
1388 				active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
1389 		}
1390 
1391 		active_rb_bitmap |= global_active_rb_bitmap;
1392 	}
1393 
1394 	adev->gfx.config.backend_enable_mask = active_rb_bitmap;
1395 	adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
1396 }
1397 
1398 static void gfx_v12_1_xcc_init_compute_vmid(struct amdgpu_device *adev,
1399 					    int xcc_id)
1400 {
1401 	int i;
1402 	uint32_t sh_mem_bases;
1403 	uint32_t data;
1404 
1405 	/*
1406 	 * Configure apertures:
1407 	 * LDS:         0x20000000'00000000 - 0x20000001'00000000 (4GB)
1408 	 * Scratch:     0x10000000'00000000 - 0x10000001'00000000 (4GB)
1409 	 */
1410 	sh_mem_bases = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1411 				     (adev->gmc.private_aperture_start >> 58));
1412 	sh_mem_bases = REG_SET_FIELD(sh_mem_bases, SH_MEM_BASES, SHARED_BASE,
1413 				     (adev->gmc.shared_aperture_start >> 48));
1414 
1415 	mutex_lock(&adev->srbm_mutex);
1416 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1417 		soc_v1_0_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1418 		/* CP and shaders */
1419 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1420 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases);
1421 
1422 		/* Enable trap for each kfd vmid. */
1423 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL);
1424 		data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
1425 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data);
1426 
1427 		/* Disable VGPR deallocation instruction for each KFD vmid. */
1428 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_DEBUG);
1429 		data = REG_SET_FIELD(data, SQ_DEBUG, DISABLE_VGPR_DEALLOC, 1);
1430 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_DEBUG, data);
1431 	}
1432 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1433 	mutex_unlock(&adev->srbm_mutex);
1434 }
1435 
1436 static void gfx_v12_1_tcp_harvest(struct amdgpu_device *adev)
1437 {
1438 	/* TODO: harvest feature to be added later. */
1439 }
1440 
1441 static void gfx_v12_1_get_tcc_info(struct amdgpu_device *adev)
1442 {
1443 }
1444 
1445 static void gfx_v12_1_xcc_constants_init(struct amdgpu_device *adev,
1446 					 int xcc_id)
1447 {
1448 	u32 tmp;
1449 	int i;
1450 
1451 	/* XXX SH_MEM regs */
1452 	/* where to put LDS, scratch, GPUVM in FSA64 space */
1453 	mutex_lock(&adev->srbm_mutex);
1454 	for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
1455 		soc_v1_0_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1456 		/* CP and shaders */
1457 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1458 			     regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1459 		if (i != 0) {
1460 			tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1461 				(adev->gmc.private_aperture_start >> 58));
1462 			tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1463 				(adev->gmc.shared_aperture_start >> 48));
1464 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, tmp);
1465 		}
1466 	}
1467 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0));
1468 
1469 	mutex_unlock(&adev->srbm_mutex);
1470 
1471 	gfx_v12_1_xcc_init_compute_vmid(adev, xcc_id);
1472 }
1473 
1474 static void gfx_v12_1_constants_init(struct amdgpu_device *adev)
1475 {
1476 	int i, num_xcc;
1477 
1478 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1479 
1480 	gfx_v12_1_setup_rb(adev);
1481 	gfx_v12_1_get_cu_info(adev, &adev->gfx.cu_info);
1482 	gfx_v12_1_get_tcc_info(adev);
1483 	adev->gfx.config.pa_sc_tile_steering_override = 0;
1484 
1485 	for (i = 0; i < num_xcc; i++)
1486 		gfx_v12_1_xcc_constants_init(adev, i);
1487 }
1488 
1489 static void gfx_v12_1_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1490 						    bool enable, int xcc_id)
1491 {
1492 	u32 tmp;
1493 
1494 	if (amdgpu_sriov_vf(adev))
1495 		return;
1496 
1497 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0);
1498 
1499 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
1500 			    enable ? 1 : 0);
1501 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
1502 			    enable ? 1 : 0);
1503 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
1504 			    enable ? 1 : 0);
1505 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
1506 			    enable ? 1 : 0);
1507 
1508 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp);
1509 }
1510 
1511 static int gfx_v12_1_xcc_init_csb(struct amdgpu_device *adev,
1512 				  int xcc_id)
1513 {
1514 	adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
1515 
1516 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CSIB_ADDR_HI,
1517 			adev->gfx.rlc.clear_state_gpu_addr >> 32);
1518 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CSIB_ADDR_LO,
1519 			adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1520 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1521 		     regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
1522 
1523 	return 0;
1524 }
1525 
1526 static void gfx_v12_1_xcc_rlc_stop(struct amdgpu_device *adev,
1527 				   int xcc_id)
1528 {
1529 	u32 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CNTL);
1530 
1531 	tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
1532 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CNTL, tmp);
1533 }
1534 
1535 static void gfx_v12_1_rlc_stop(struct amdgpu_device *adev)
1536 {
1537 	int i, num_xcc;
1538 
1539 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1540 	for (i = 0; i < num_xcc; i++)
1541 		gfx_v12_1_xcc_rlc_stop(adev, i);
1542 }
1543 
1544 static void gfx_v12_1_xcc_rlc_reset(struct amdgpu_device *adev,
1545 				    int xcc_id)
1546 {
1547 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id),
1548 			      GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
1549 	udelay(50);
1550 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id),
1551 			      GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
1552 	udelay(50);
1553 }
1554 
1555 static void gfx_v12_1_rlc_reset(struct amdgpu_device *adev)
1556 {
1557 	int i, num_xcc;
1558 
1559 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1560 	for (i = 0; i < num_xcc; i++)
1561 		gfx_v12_1_xcc_rlc_reset(adev, i);
1562 }
1563 
1564 static void gfx_v12_1_xcc_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
1565 						 bool enable, int xcc_id)
1566 {
1567 	uint32_t rlc_pg_cntl;
1568 
1569 	rlc_pg_cntl = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_PG_CNTL);
1570 
1571 	if (!enable) {
1572 		/* RLC_PG_CNTL[23] = 0 (default)
1573 		 * RLC will wait for handshake acks with SMU
1574 		 * GFXOFF will be enabled
1575 		 * RLC_PG_CNTL[23] = 1
1576 		 * RLC will not issue any message to SMU
1577 		 * hence no handshake between SMU & RLC
1578 		 * GFXOFF will be disabled
1579 		 */
1580 		rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
1581 	} else
1582 		rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
1583 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_PG_CNTL, rlc_pg_cntl);
1584 }
1585 
1586 static void gfx_v12_1_xcc_rlc_start(struct amdgpu_device *adev,
1587 				    int xcc_id)
1588 {
1589 	/* TODO: enable rlc & smu handshake until smu
1590 	 * and gfxoff feature works as expected */
1591 	if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK))
1592 		gfx_v12_1_xcc_rlc_smu_handshake_cntl(adev, false, xcc_id);
1593 
1594 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL, RLC_ENABLE_F32, 1);
1595 	udelay(50);
1596 }
1597 
1598 static void gfx_v12_1_rlc_start(struct amdgpu_device *adev)
1599 {
1600 	int i, num_xcc;
1601 
1602 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1603 	for (i = 0; i < num_xcc; i++) {
1604 		gfx_v12_1_xcc_rlc_start(adev, i);
1605 	}
1606 }
1607 
1608 static void gfx_v12_1_xcc_rlc_enable_srm(struct amdgpu_device *adev,
1609 					 int xcc_id)
1610 {
1611 	uint32_t tmp;
1612 
1613 	/* enable Save Restore Machine */
1614 	tmp = RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SRM_CNTL));
1615 	tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1616 	tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
1617 	WREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SRM_CNTL), tmp);
1618 }
1619 
1620 static void gfx_v12_1_xcc_load_rlcg_microcode(struct amdgpu_device *adev,
1621 					      int xcc_id)
1622 {
1623 	const struct rlc_firmware_header_v2_0 *hdr;
1624 	const __le32 *fw_data;
1625 	unsigned i, fw_size;
1626 
1627 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1628 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1629 			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1630 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1631 
1632 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR,
1633 		     RLCG_UCODE_LOADING_START_ADDRESS);
1634 
1635 	for (i = 0; i < fw_size; i++)
1636 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1637 			     regRLC_GPM_UCODE_DATA,
1638 			     le32_to_cpup(fw_data++));
1639 
1640 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1641 		     regRLC_GPM_UCODE_ADDR,
1642 		     adev->gfx.rlc_fw_version);
1643 }
1644 
1645 static void gfx_v12_1_xcc_load_rlc_iram_dram_microcode(struct amdgpu_device *adev,
1646 						       int xcc_id)
1647 {
1648 	const struct rlc_firmware_header_v2_2 *hdr;
1649 	const __le32 *fw_data;
1650 	unsigned i, fw_size;
1651 	u32 tmp;
1652 
1653 	hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1654 
1655 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1656 			le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes));
1657 	fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4;
1658 
1659 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_LX6_IRAM_ADDR, 0);
1660 
1661 	for (i = 0; i < fw_size; i++) {
1662 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1663 			msleep(1);
1664 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1665 			     regRLC_LX6_IRAM_DATA,
1666 			     le32_to_cpup(fw_data++));
1667 	}
1668 
1669 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1670 		     regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
1671 
1672 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1673 			le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes));
1674 	fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4;
1675 
1676 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1677 		     regRLC_LX6_DRAM_ADDR, 0);
1678 	for (i = 0; i < fw_size; i++) {
1679 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1680 			msleep(1);
1681 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1682 			     regRLC_LX6_DRAM_DATA,
1683 			     le32_to_cpup(fw_data++));
1684 	}
1685 
1686 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1687 		     regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
1688 
1689 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_LX6_CNTL);
1690 	tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1);
1691 	tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0);
1692 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_LX6_CNTL, tmp);
1693 }
1694 
1695 static int gfx_v12_1_xcc_rlc_load_microcode(struct amdgpu_device *adev,
1696 					    int xcc_id)
1697 {
1698 	const struct rlc_firmware_header_v2_0 *hdr;
1699 	uint16_t version_major;
1700 	uint16_t version_minor;
1701 
1702 	if (!adev->gfx.rlc_fw)
1703 		return -EINVAL;
1704 
1705 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1706 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
1707 
1708 	version_major = le16_to_cpu(hdr->header.header_version_major);
1709 	version_minor = le16_to_cpu(hdr->header.header_version_minor);
1710 
1711 	if (version_major == 2) {
1712 		gfx_v12_1_xcc_load_rlcg_microcode(adev, xcc_id);
1713 		if (amdgpu_dpm == 1) {
1714 			if (version_minor >= 2)
1715 				gfx_v12_1_xcc_load_rlc_iram_dram_microcode(adev, xcc_id);
1716 		}
1717 
1718 		return 0;
1719 	}
1720 
1721 	return -EINVAL;
1722 }
1723 
1724 static int gfx_v12_1_xcc_rlc_resume(struct amdgpu_device *adev,
1725 				    int xcc_id)
1726 {
1727 	int r;
1728 
1729 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1730 		gfx_v12_1_xcc_init_csb(adev, xcc_id);
1731 
1732 		if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
1733 			gfx_v12_1_xcc_rlc_enable_srm(adev, xcc_id);
1734 	} else {
1735 		if (amdgpu_sriov_vf(adev)) {
1736 			gfx_v12_1_xcc_init_csb(adev, xcc_id);
1737 			return 0;
1738 		}
1739 
1740 		gfx_v12_1_xcc_rlc_stop(adev, xcc_id);
1741 
1742 		/* disable CG */
1743 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0);
1744 
1745 		/* disable PG */
1746 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_PG_CNTL, 0);
1747 
1748 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1749 			/* legacy rlc firmware loading */
1750 			r = gfx_v12_1_xcc_rlc_load_microcode(adev, xcc_id);
1751 			if (r)
1752 				return r;
1753 		}
1754 
1755 		gfx_v12_1_xcc_init_csb(adev, xcc_id);
1756 
1757 		gfx_v12_1_xcc_rlc_start(adev, xcc_id);
1758 	}
1759 
1760 	return 0;
1761 }
1762 
1763 static int gfx_v12_1_rlc_resume(struct amdgpu_device *adev)
1764 {
1765 	int r, i, num_xcc;
1766 
1767 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1768 	for (i = 0; i < num_xcc; i++) {
1769 		r = gfx_v12_1_xcc_rlc_resume(adev, i);
1770 		if (r)
1771 			return r;
1772 	}
1773 
1774 	return 0;
1775 }
1776 
1777 static void gfx_v12_1_xcc_config_gfx_rs64(struct amdgpu_device *adev,
1778 					  int xcc_id)
1779 {
1780 	const struct gfx_firmware_header_v2_0 *mec_hdr;
1781 	uint32_t pipe_id, tmp;
1782 
1783 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)
1784 		adev->gfx.mec_fw->data;
1785 
1786 	/* config mec program start addr */
1787 	for (pipe_id = 0; pipe_id < 4; pipe_id++) {
1788 		soc_v1_0_grbm_select(adev, 1, pipe_id, 0, 0, GET_INST(GC, xcc_id));
1789 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START,
1790 					mec_hdr->ucode_start_addr_lo >> 2 |
1791 					mec_hdr->ucode_start_addr_hi << 30);
1792 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START_HI,
1793 					mec_hdr->ucode_start_addr_hi >> 2);
1794 	}
1795 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0));
1796 
1797 	/* reset mec pipe */
1798 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL);
1799 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1);
1800 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1);
1801 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1);
1802 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1);
1803 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL, tmp);
1804 
1805 	/* clear mec pipe reset */
1806 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0);
1807 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0);
1808 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0);
1809 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0);
1810 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL, tmp);
1811 }
1812 
1813 static void gfx_v12_1_config_gfx_rs64(struct amdgpu_device *adev)
1814 {
1815 	int i, num_xcc;
1816 
1817 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1818 
1819 	for (i = 0; i < num_xcc; i++)
1820 		gfx_v12_1_xcc_config_gfx_rs64(adev, i);
1821 }
1822 
1823 static void gfx_v12_1_xcc_set_mec_ucode_start_addr(struct amdgpu_device *adev,
1824 						   int xcc_id)
1825 {
1826 	const struct gfx_firmware_header_v2_0 *cp_hdr;
1827 	unsigned pipe_id;
1828 
1829 	cp_hdr = (const struct gfx_firmware_header_v2_0 *)
1830 		adev->gfx.mec_fw->data;
1831 	mutex_lock(&adev->srbm_mutex);
1832 	for (pipe_id = 0; pipe_id < adev->gfx.mec.num_pipe_per_mec; pipe_id++) {
1833 		soc_v1_0_grbm_select(adev, 1, pipe_id, 0, 0, GET_INST(GC, xcc_id));
1834 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START,
1835 			     cp_hdr->ucode_start_addr_lo >> 2 |
1836 			     cp_hdr->ucode_start_addr_hi << 30);
1837 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START_HI,
1838 			     cp_hdr->ucode_start_addr_hi >> 2);
1839 	}
1840 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0));
1841 	mutex_unlock(&adev->srbm_mutex);
1842 }
1843 
1844 static int gfx_v12_1_xcc_wait_for_rlc_autoload_complete(struct amdgpu_device *adev,
1845 							int xcc_id)
1846 {
1847 	uint32_t cp_status;
1848 	uint32_t bootload_status;
1849 	int i;
1850 
1851 	for (i = 0; i < adev->usec_timeout; i++) {
1852 		cp_status = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_STAT);
1853 		bootload_status = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
1854 					       regRLC_RLCS_BOOTLOAD_STATUS);
1855 
1856 		if ((cp_status == 0) &&
1857 		    (REG_GET_FIELD(bootload_status,
1858 			RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) {
1859 			break;
1860 		}
1861 		udelay(1);
1862 		if (amdgpu_emu_mode)
1863 			msleep(10);
1864 	}
1865 
1866 	if (i >= adev->usec_timeout) {
1867 		dev_err(adev->dev,
1868 			"rlc autoload: xcc%d gc ucode autoload timeout\n", xcc_id);
1869 		return -ETIMEDOUT;
1870 	}
1871 
1872 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1873 		gfx_v12_1_xcc_set_mec_ucode_start_addr(adev, xcc_id);
1874 	}
1875 
1876 	return 0;
1877 }
1878 
1879 static int gfx_v12_1_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
1880 {
1881 	int xcc_id;
1882 
1883 	for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++)
1884 		gfx_v12_1_xcc_wait_for_rlc_autoload_complete(adev, xcc_id);
1885 
1886 	return 0;
1887 }
1888 
1889 static void gfx_v12_1_xcc_cp_compute_enable(struct amdgpu_device *adev,
1890 					    bool enable, int xcc_id)
1891 {
1892 	u32 data;
1893 
1894 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL);
1895 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE,
1896 						 enable ? 0 : 1);
1897 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET,
1898 						 enable ? 0 : 1);
1899 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET,
1900 						 enable ? 0 : 1);
1901 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET,
1902 						 enable ? 0 : 1);
1903 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET,
1904 						 enable ? 0 : 1);
1905 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE,
1906 						 enable ? 1 : 0);
1907 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE,
1908 			                         enable ? 1 : 0);
1909 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE,
1910 						 enable ? 1 : 0);
1911 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE,
1912 						 enable ? 1 : 0);
1913 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT,
1914 						 enable ? 0 : 1);
1915 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL, data);
1916 
1917 	adev->gfx.kiq[xcc_id].ring.sched.ready = enable;
1918 
1919 	udelay(50);
1920 }
1921 
1922 static int gfx_v12_1_xcc_cp_compute_load_microcode_rs64(struct amdgpu_device *adev,
1923 							uint16_t xcc_mask)
1924 {
1925 	const struct gfx_firmware_header_v2_0 *mec_hdr;
1926 	const __le32 *fw_ucode, *fw_data;
1927 	u32 tmp, fw_ucode_size, fw_data_size;
1928 	u32 i, usec_timeout = 50000; /* Wait for 50 ms */
1929 	u32 *fw_ucode_ptr, *fw_data_ptr;
1930 	int r, xcc_id;
1931 
1932 	if (!adev->gfx.mec_fw)
1933 		return -EINVAL;
1934 
1935 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
1936 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
1937 
1938 	fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data +
1939 				le32_to_cpu(mec_hdr->ucode_offset_bytes));
1940 	fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes);
1941 
1942 	fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1943 				le32_to_cpu(mec_hdr->data_offset_bytes));
1944 	fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes);
1945 
1946 	r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
1947 				      64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
1948 				      &adev->gfx.mec.mec_fw_obj,
1949 				      &adev->gfx.mec.mec_fw_gpu_addr,
1950 				      (void **)&fw_ucode_ptr);
1951 	if (r) {
1952 		dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
1953 		gfx_v12_1_mec_fini(adev);
1954 		return r;
1955 	}
1956 
1957 	r = amdgpu_bo_create_reserved(adev,
1958 				      ALIGN(fw_data_size, 64 * 1024) *
1959 				      adev->gfx.mec.num_pipe_per_mec * NUM_XCC(xcc_mask),
1960 				      64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
1961 				      &adev->gfx.mec.mec_fw_data_obj,
1962 				      &adev->gfx.mec.mec_fw_data_gpu_addr,
1963 				      (void **)&fw_data_ptr);
1964 	if (r) {
1965 		dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
1966 		gfx_v12_1_mec_fini(adev);
1967 		return r;
1968 	}
1969 
1970 	memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size);
1971 	for (xcc_id = 0; xcc_id < NUM_XCC(xcc_mask); xcc_id++) {
1972 		for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
1973 			u32 offset = (xcc_id * adev->gfx.mec.num_pipe_per_mec + i) *
1974 				     ALIGN(fw_data_size, 64 * 1024) / 4;
1975 			memcpy(fw_data_ptr + offset, fw_data, fw_data_size);
1976 		}
1977 	}
1978 
1979 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1980 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj);
1981 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1982 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj);
1983 
1984 	for (xcc_id = 0; xcc_id < NUM_XCC(xcc_mask); xcc_id++) {
1985 		gfx_v12_1_xcc_cp_compute_enable(adev, false, xcc_id);
1986 
1987 		tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL);
1988 		tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
1989 		tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
1990 		tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
1991 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp);
1992 
1993 		tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_BASE_CNTL);
1994 		tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
1995 		tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
1996 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_BASE_CNTL, tmp);
1997 
1998 		mutex_lock(&adev->srbm_mutex);
1999 		for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
2000 			soc_v1_0_grbm_select(adev, 1, i, 0, 0, GET_INST(GC, xcc_id));
2001 
2002 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_MDBASE_LO,
2003 					lower_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr +
2004 									(xcc_id * adev->gfx.mec.num_pipe_per_mec + i) *
2005 									ALIGN(fw_data_size, 64 * 1024)));
2006 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_MDBASE_HI,
2007 					upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr +
2008 									(xcc_id * adev->gfx.mec.num_pipe_per_mec + i) *
2009 									ALIGN(fw_data_size, 64 * 1024)));
2010 
2011 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO,
2012 					lower_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2013 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI,
2014 					upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2015 		}
2016 		mutex_unlock(&adev->srbm_mutex);
2017 		soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0));
2018 
2019 		/* Trigger an invalidation of the L1 instruction caches */
2020 		tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_OP_CNTL);
2021 		tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2022 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_OP_CNTL, tmp);
2023 
2024 		/* Wait for invalidation complete */
2025 		for (i = 0; i < usec_timeout; i++) {
2026 			tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_OP_CNTL);
2027 			if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
2028 						INVALIDATE_DCACHE_COMPLETE))
2029 				break;
2030 			udelay(1);
2031 		}
2032 
2033 		if (i >= usec_timeout) {
2034 			dev_err(adev->dev, "failed to invalidate instruction cache\n");
2035 			return -EINVAL;
2036 		}
2037 
2038 		/* Trigger an invalidation of the L1 instruction caches */
2039 		tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_OP_CNTL);
2040 		tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2041 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_OP_CNTL, tmp);
2042 
2043 		/* Wait for invalidation complete */
2044 		for (i = 0; i < usec_timeout; i++) {
2045 			tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_OP_CNTL);
2046 			if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2047 						INVALIDATE_CACHE_COMPLETE))
2048 				break;
2049 			udelay(1);
2050 		}
2051 
2052 		if (i >= usec_timeout) {
2053 			dev_err(adev->dev, "failed to invalidate instruction cache\n");
2054 			return -EINVAL;
2055 		}
2056 
2057 		gfx_v12_1_xcc_set_mec_ucode_start_addr(adev, xcc_id);
2058 	}
2059 
2060 	return 0;
2061 }
2062 
2063 static void gfx_v12_1_xcc_kiq_setting(struct amdgpu_ring *ring,
2064 				      int xcc_id)
2065 {
2066 	uint32_t tmp;
2067 	struct amdgpu_device *adev = ring->adev;
2068 
2069 	/* tell RLC which is KIQ queue */
2070 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
2071 	tmp &= 0xffffff00;
2072 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2073 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
2074 	tmp |= 0x80;
2075 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
2076 }
2077 
2078 static void gfx_v12_1_xcc_cp_set_doorbell_range(struct amdgpu_device *adev,
2079 						int xcc_id)
2080 {
2081 	/* disable gfx engine doorbell range */
2082 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_DOORBELL_RANGE_LOWER, 0);
2083 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_DOORBELL_RANGE_UPPER, 0);
2084 
2085 	/* set compute engine doorbell range */
2086 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DOORBELL_RANGE_LOWER,
2087 		     ((adev->doorbell_index.kiq +
2088 		       xcc_id * adev->doorbell_index.xcc_doorbell_range) *
2089 		      2) << 2);
2090 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DOORBELL_RANGE_UPPER,
2091 		     ((adev->doorbell_index.userqueue_end +
2092 		       xcc_id * adev->doorbell_index.xcc_doorbell_range) *
2093 		      2) << 2);
2094 }
2095 
2096 static int gfx_v12_1_compute_mqd_init(struct amdgpu_device *adev, void *m,
2097 				      struct amdgpu_mqd_prop *prop)
2098 {
2099 	struct v12_1_compute_mqd *mqd = m;
2100 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
2101 	uint32_t tmp;
2102 
2103 	mqd->header = 0xC0310800;
2104 	mqd->compute_pipelinestat_enable = 0x00000001;
2105 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
2106 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
2107 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
2108 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2109 	mqd->compute_misc_reserved = 0x00000007;
2110 
2111 	eop_base_addr = prop->eop_gpu_addr >> 8;
2112 	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
2113 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
2114 
2115 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2116 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_EOP_CONTROL);
2117 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
2118 			(order_base_2(GFX12_MEC_HPD_SIZE / 4) - 1));
2119 
2120 	mqd->cp_hqd_eop_control = tmp;
2121 
2122 	/* enable doorbell? */
2123 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PQ_DOORBELL_CONTROL);
2124 
2125 	if (prop->use_doorbell) {
2126 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2127 				    DOORBELL_OFFSET, prop->doorbell_index);
2128 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2129 				    DOORBELL_EN, 1);
2130 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2131 				    DOORBELL_SOURCE, 0);
2132 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2133 				    DOORBELL_HIT, 0);
2134 	} else {
2135 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2136 				    DOORBELL_EN, 0);
2137 	}
2138 
2139 	mqd->cp_hqd_pq_doorbell_control = tmp;
2140 
2141 	/* disable the queue if it's active */
2142 	mqd->cp_hqd_dequeue_request = 0;
2143 	mqd->cp_hqd_pq_rptr = 0;
2144 	mqd->cp_hqd_pq_wptr_lo = 0;
2145 	mqd->cp_hqd_pq_wptr_hi = 0;
2146 
2147 	/* set the pointer to the MQD */
2148 	mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc;
2149 	mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
2150 
2151 	/* set MQD vmid to 0 */
2152 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_MQD_CONTROL);
2153 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
2154 	mqd->cp_mqd_control = tmp;
2155 
2156 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2157 	hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
2158 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
2159 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2160 
2161 	/* set up the HQD, this is similar to CP_RB0_CNTL */
2162 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PQ_CONTROL);
2163 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
2164 			    (order_base_2(prop->queue_size / 4) - 1));
2165 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
2166 			    (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
2167 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
2168 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
2169 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
2170 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
2171 	mqd->cp_hqd_pq_control = tmp;
2172 
2173 	/* set the wb address whether it's enabled or not */
2174 	wb_gpu_addr = prop->rptr_gpu_addr;
2175 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
2176 	mqd->cp_hqd_pq_rptr_report_addr_hi =
2177 		upper_32_bits(wb_gpu_addr) & 0xffff;
2178 
2179 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2180 	wb_gpu_addr = prop->wptr_gpu_addr;
2181 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2182 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2183 
2184 	tmp = 0;
2185 	/* enable the doorbell if requested */
2186 	if (prop->use_doorbell) {
2187 		tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PQ_DOORBELL_CONTROL);
2188 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2189 				DOORBELL_OFFSET, prop->doorbell_index);
2190 
2191 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2192 				    DOORBELL_EN, 1);
2193 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2194 				    DOORBELL_SOURCE, 0);
2195 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2196 				    DOORBELL_HIT, 0);
2197 	}
2198 
2199 	mqd->cp_hqd_pq_doorbell_control = tmp;
2200 
2201 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2202 	mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PQ_RPTR);
2203 
2204 	/* set the vmid for the queue */
2205 	mqd->cp_hqd_vmid = 0;
2206 
2207 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PERSISTENT_STATE);
2208 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x63);
2209 	mqd->cp_hqd_persistent_state = tmp;
2210 
2211 	/* set MIN_IB_AVAIL_SIZE */
2212 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_IB_CONTROL);
2213 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 1);
2214 	mqd->cp_hqd_ib_control = tmp;
2215 
2216 	/* set static priority for a compute queue/ring */
2217 	mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority;
2218 	mqd->cp_hqd_queue_priority = prop->hqd_queue_priority;
2219 
2220 	mqd->cp_hqd_active = prop->hqd_active;
2221 
2222 	return 0;
2223 }
2224 
2225 static int gfx_v12_1_xcc_kiq_init_register(struct amdgpu_ring *ring,
2226 					   int xcc_id)
2227 {
2228 	struct amdgpu_device *adev = ring->adev;
2229 	struct v12_1_compute_mqd *mqd = ring->mqd_ptr;
2230 	int j;
2231 
2232 	/* inactivate the queue */
2233 	if (amdgpu_sriov_vf(adev))
2234 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0);
2235 
2236 	/* disable wptr polling */
2237 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
2238 
2239 	/* write the EOP addr */
2240 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR,
2241 	       mqd->cp_hqd_eop_base_addr_lo);
2242 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI,
2243 	       mqd->cp_hqd_eop_base_addr_hi);
2244 
2245 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2246 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL,
2247 	       mqd->cp_hqd_eop_control);
2248 
2249 	/* enable doorbell? */
2250 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
2251 	       mqd->cp_hqd_pq_doorbell_control);
2252 
2253 	/* disable the queue if it's active */
2254 	if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
2255 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
2256 		for (j = 0; j < adev->usec_timeout; j++) {
2257 			if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
2258 				break;
2259 			udelay(1);
2260 		}
2261 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
2262 		       mqd->cp_hqd_dequeue_request);
2263 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR,
2264 		       mqd->cp_hqd_pq_rptr);
2265 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
2266 		       mqd->cp_hqd_pq_wptr_lo);
2267 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
2268 		       mqd->cp_hqd_pq_wptr_hi);
2269 	}
2270 
2271 	/* set the pointer to the MQD */
2272 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR,
2273 	       mqd->cp_mqd_base_addr_lo);
2274 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI,
2275 	       mqd->cp_mqd_base_addr_hi);
2276 
2277 	/* set MQD vmid to 0 */
2278 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL,
2279 	       mqd->cp_mqd_control);
2280 
2281 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2282 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE,
2283 	       mqd->cp_hqd_pq_base_lo);
2284 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI,
2285 	       mqd->cp_hqd_pq_base_hi);
2286 
2287 	/* set up the HQD, this is similar to CP_RB0_CNTL */
2288 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL,
2289 	       mqd->cp_hqd_pq_control);
2290 
2291 	/* set the wb address whether it's enabled or not */
2292 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR,
2293 		mqd->cp_hqd_pq_rptr_report_addr_lo);
2294 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
2295 		mqd->cp_hqd_pq_rptr_report_addr_hi);
2296 
2297 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2298 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR,
2299 	       mqd->cp_hqd_pq_wptr_poll_addr_lo);
2300 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
2301 	       mqd->cp_hqd_pq_wptr_poll_addr_hi);
2302 
2303 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
2304 	       mqd->cp_hqd_pq_doorbell_control);
2305 
2306 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2307 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
2308 	       mqd->cp_hqd_pq_wptr_lo);
2309 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
2310 	       mqd->cp_hqd_pq_wptr_hi);
2311 
2312 	/* set the vmid for the queue */
2313 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_VMID, mqd->cp_hqd_vmid);
2314 
2315 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE,
2316 	       mqd->cp_hqd_persistent_state);
2317 
2318 	/* activate the queue */
2319 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE,
2320 	       mqd->cp_hqd_active);
2321 
2322 	if (ring->use_doorbell)
2323 		WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_STATUS, DOORBELL_ENABLE, 1);
2324 
2325 	return 0;
2326 }
2327 
2328 static int gfx_v12_1_xcc_kiq_init_queue(struct amdgpu_ring *ring,
2329 					int xcc_id)
2330 {
2331 	struct amdgpu_device *adev = ring->adev;
2332 	struct v12_1_compute_mqd *mqd = ring->mqd_ptr;
2333 
2334 	gfx_v12_1_xcc_kiq_setting(ring, xcc_id);
2335 
2336 	if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
2337 		/* reset MQD to a clean status */
2338 		if (adev->gfx.kiq[xcc_id].mqd_backup)
2339 			memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(*mqd));
2340 
2341 		/* reset ring buffer */
2342 		ring->wptr = 0;
2343 		amdgpu_ring_clear_ring(ring);
2344 
2345 		mutex_lock(&adev->srbm_mutex);
2346 		soc_v1_0_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2347 		gfx_v12_1_xcc_kiq_init_register(ring, xcc_id);
2348 		soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2349 		mutex_unlock(&adev->srbm_mutex);
2350 	} else {
2351 		memset((void *)mqd, 0, sizeof(*mqd));
2352 		if (amdgpu_sriov_vf(adev) && adev->in_suspend)
2353 			amdgpu_ring_clear_ring(ring);
2354 		mutex_lock(&adev->srbm_mutex);
2355 		soc_v1_0_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2356 		amdgpu_ring_init_mqd(ring);
2357 		gfx_v12_1_xcc_kiq_init_register(ring, xcc_id);
2358 		soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2359 		mutex_unlock(&adev->srbm_mutex);
2360 
2361 		if (adev->gfx.kiq[xcc_id].mqd_backup)
2362 			memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(*mqd));
2363 	}
2364 
2365 	return 0;
2366 }
2367 
2368 static int gfx_v12_1_xcc_kcq_init_queue(struct amdgpu_ring *ring,
2369 					int xcc_id)
2370 {
2371 	struct amdgpu_device *adev = ring->adev;
2372 	struct v12_1_compute_mqd *mqd = ring->mqd_ptr;
2373 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
2374 
2375 	if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2376 		memset((void *)mqd, 0, sizeof(*mqd));
2377 		mutex_lock(&adev->srbm_mutex);
2378 		soc_v1_0_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2379 		amdgpu_ring_init_mqd(ring);
2380 		soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2381 		mutex_unlock(&adev->srbm_mutex);
2382 
2383 		if (adev->gfx.mec.mqd_backup[mqd_idx])
2384 			memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
2385 	} else {
2386 		/* restore MQD to a clean status */
2387 		if (adev->gfx.mec.mqd_backup[mqd_idx])
2388 			memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
2389 		/* reset ring buffer */
2390 		ring->wptr = 0;
2391 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
2392 		amdgpu_ring_clear_ring(ring);
2393 	}
2394 
2395 	return 0;
2396 }
2397 
2398 static int gfx_v12_1_xcc_kiq_resume(struct amdgpu_device *adev,
2399 				    int xcc_id)
2400 {
2401 	struct amdgpu_ring *ring;
2402 	int r;
2403 
2404 	ring = &adev->gfx.kiq[xcc_id].ring;
2405 
2406 	r = amdgpu_bo_reserve(ring->mqd_obj, false);
2407 	if (unlikely(r != 0))
2408 		return r;
2409 
2410 	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2411 	if (unlikely(r != 0)) {
2412 		amdgpu_bo_unreserve(ring->mqd_obj);
2413 		return r;
2414 	}
2415 
2416 	gfx_v12_1_xcc_kiq_init_queue(ring, xcc_id);
2417 	amdgpu_bo_kunmap(ring->mqd_obj);
2418 	ring->mqd_ptr = NULL;
2419 	amdgpu_bo_unreserve(ring->mqd_obj);
2420 	ring->sched.ready = true;
2421 	return 0;
2422 }
2423 
2424 static int gfx_v12_1_xcc_kcq_resume(struct amdgpu_device *adev,
2425 				    int xcc_id)
2426 {
2427 	struct amdgpu_ring *ring = NULL;
2428 	int r = 0, i;
2429 
2430 	if (!amdgpu_async_gfx_ring)
2431 		gfx_v12_1_xcc_cp_compute_enable(adev, true, xcc_id);
2432 
2433 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2434 		ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
2435 
2436 		r = amdgpu_bo_reserve(ring->mqd_obj, false);
2437 		if (unlikely(r != 0))
2438 			goto done;
2439 		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2440 		if (!r) {
2441 			r = gfx_v12_1_xcc_kcq_init_queue(ring, xcc_id);
2442 			amdgpu_bo_kunmap(ring->mqd_obj);
2443 			ring->mqd_ptr = NULL;
2444 		}
2445 		amdgpu_bo_unreserve(ring->mqd_obj);
2446 		if (r)
2447 			goto done;
2448 	}
2449 
2450 	r = amdgpu_gfx_enable_kcq(adev, xcc_id);
2451 done:
2452 	return r;
2453 }
2454 
2455 static int gfx_v12_1_xcc_cp_resume(struct amdgpu_device *adev, uint16_t xcc_mask)
2456 {
2457 	int r, i, xcc_id;
2458 	struct amdgpu_ring *ring;
2459 
2460 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
2461 		/* legacy firmware loading */
2462 		r = gfx_v12_1_xcc_cp_compute_load_microcode_rs64(adev, xcc_mask);
2463 		if (r)
2464 			return r;
2465 	}
2466 
2467 	for (xcc_id = 0; xcc_id < NUM_XCC(xcc_mask); xcc_id++) {
2468 		if (!(adev->flags & AMD_IS_APU))
2469 			gfx_v12_1_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
2470 
2471 		gfx_v12_1_xcc_cp_set_doorbell_range(adev, xcc_id);
2472 
2473 		gfx_v12_1_xcc_cp_compute_enable(adev, true, xcc_id);
2474 
2475 		if (adev->enable_mes_kiq && adev->mes.kiq_hw_init)
2476 			r = amdgpu_mes_kiq_hw_init(adev, xcc_id);
2477 		else
2478 			r = gfx_v12_1_xcc_kiq_resume(adev, xcc_id);
2479 		if (r)
2480 			return r;
2481 
2482 		r = gfx_v12_1_xcc_kcq_resume(adev, xcc_id);
2483 		if (r)
2484 			return r;
2485 
2486 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2487 			ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
2488 			r = amdgpu_ring_test_helper(ring);
2489 			if (r)
2490 				return r;
2491 		}
2492 	}
2493 
2494 	return 0;
2495 }
2496 
2497 static int gfx_v12_1_cp_resume(struct amdgpu_device *adev)
2498 {
2499 	int num_xcc, num_xcp, num_xcc_per_xcp;
2500 	int r = 0;
2501 
2502 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2503 	if (amdgpu_sriov_vf(adev)) {
2504 		enum amdgpu_gfx_partition mode;
2505 
2506 		mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2507 						       AMDGPU_XCP_FL_NONE);
2508 		if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2509 			return -EINVAL;
2510 		if (adev->gfx.funcs &&
2511 		    adev->gfx.funcs->get_xccs_per_xcp) {
2512 			num_xcc_per_xcp = adev->gfx.funcs->get_xccs_per_xcp(adev);
2513 			adev->gfx.num_xcc_per_xcp = num_xcc_per_xcp;
2514 			num_xcp = num_xcc / num_xcc_per_xcp;
2515 		} else {
2516 			return -EINVAL;
2517 		}
2518 		r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode);
2519 
2520 	} else {
2521 		if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2522 						    AMDGPU_XCP_FL_NONE) ==
2523 		    AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2524 			r = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr,
2525 							     amdgpu_user_partt_mode);
2526 	}
2527 
2528 	if (r)
2529 		return r;
2530 
2531 	return gfx_v12_1_xcc_cp_resume(adev, adev->gfx.xcc_mask);
2532 }
2533 
2534 static int gfx_v12_1_gfxhub_enable(struct amdgpu_device *adev)
2535 {
2536 	int r, i;
2537 	bool value;
2538 
2539 	r = adev->gfxhub.funcs->gart_enable(adev);
2540 	if (r)
2541 		return r;
2542 
2543 	value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
2544 		false : true;
2545 
2546 	adev->gfxhub.funcs->set_fault_enable_default(adev, value);
2547 	/* TODO investigate why TLB flush is needed,
2548 	 * are we missing a flush somewhere else? */
2549 	for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
2550 		if (AMDGPU_IS_GFXHUB(i))
2551 			adev->gmc.gmc_funcs->flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(i), 0);
2552 	}
2553 
2554 	return 0;
2555 }
2556 
2557 static int get_gb_addr_config(struct amdgpu_device *adev)
2558 {
2559 	u32 gb_addr_config;
2560 
2561 	gb_addr_config = RREG32_SOC15(GC, GET_INST(GC, 0), regGB_ADDR_CONFIG_READ);
2562 	if (gb_addr_config == 0)
2563 		return -EINVAL;
2564 
2565 	adev->gfx.config.gb_addr_config_fields.num_pkrs =
2566 		1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG_READ, NUM_PKRS);
2567 
2568 	adev->gfx.config.gb_addr_config = gb_addr_config;
2569 
2570 	adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
2571 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2572 				      GB_ADDR_CONFIG_READ, NUM_PIPES);
2573 
2574 	adev->gfx.config.max_tile_pipes =
2575 		adev->gfx.config.gb_addr_config_fields.num_pipes;
2576 
2577 	adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
2578 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2579 				      GB_ADDR_CONFIG_READ, MAX_COMPRESSED_FRAGS);
2580 	adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
2581 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2582 				      GB_ADDR_CONFIG_READ, NUM_RB_PER_SE);
2583 	adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
2584 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2585 				      GB_ADDR_CONFIG_READ, NUM_SHADER_ENGINES);
2586 	adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
2587 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2588 				      GB_ADDR_CONFIG_READ, PIPE_INTERLEAVE_SIZE));
2589 
2590 	return 0;
2591 }
2592 
2593 static void gfx_v12_1_xcc_disable_gpa_mode(struct amdgpu_device *adev,
2594 					   int xcc_id)
2595 {
2596 	uint32_t data;
2597 
2598 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG);
2599 	data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK;
2600 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data);
2601 
2602 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPG_PSP_DEBUG);
2603 	data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK;
2604 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPG_PSP_DEBUG, data);
2605 }
2606 
2607 static void gfx_v12_1_xcc_enable_atomics(struct amdgpu_device *adev,
2608 					 int xcc_id)
2609 {
2610 	uint32_t data;
2611 
2612 	/* Set the TCP UTCL0 register to enable atomics */
2613 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regTCP_UTCL0_CNTL1);
2614 	data = REG_SET_FIELD(data, TCP_UTCL0_CNTL1, ATOMIC_REQUESTER_EN, 0x1);
2615 
2616 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regTCP_UTCL0_CNTL1, data);
2617 }
2618 
2619 static void gfx_v12_1_init_golden_registers(struct amdgpu_device *adev)
2620 {
2621 	int i;
2622 
2623 	for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); i++)
2624 		gfx_v12_1_xcc_enable_atomics(adev, i);
2625 }
2626 
2627 static int gfx_v12_1_hw_init(struct amdgpu_ip_block *ip_block)
2628 {
2629 	int r, i, num_xcc;
2630 	struct amdgpu_device *adev = ip_block->adev;
2631 
2632 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
2633 		/* rlc autoload firmware */
2634 		r = gfx_v12_1_rlc_backdoor_autoload_enable(adev);
2635 		if (r)
2636 			return r;
2637 	} else {
2638 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
2639 			num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2640 
2641 			if (adev->gfx.imu.funcs) {
2642 				if (adev->gfx.imu.funcs->load_microcode)
2643 					adev->gfx.imu.funcs->load_microcode(adev);
2644 			}
2645 
2646 			for (i = 0; i < num_xcc; i++) {
2647 				/* disable gpa mode in backdoor loading */
2648 				gfx_v12_1_xcc_disable_gpa_mode(adev, i);
2649 			}
2650 		}
2651 	}
2652 
2653 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) ||
2654 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2655 		r = gfx_v12_1_wait_for_rlc_autoload_complete(adev);
2656 		if (r) {
2657 			dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r);
2658 			return r;
2659 		}
2660 	}
2661 
2662 	adev->gfx.is_poweron = true;
2663 
2664 	if (get_gb_addr_config(adev))
2665 		DRM_WARN("Invalid gb_addr_config !\n");
2666 
2667 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2668 		gfx_v12_1_config_gfx_rs64(adev);
2669 
2670 	r = gfx_v12_1_gfxhub_enable(adev);
2671 	if (r)
2672 		return r;
2673 
2674 	gfx_v12_1_init_golden_registers(adev);
2675 
2676 	gfx_v12_1_constants_init(adev);
2677 
2678 	if (adev->nbio.funcs->gc_doorbell_init)
2679 		adev->nbio.funcs->gc_doorbell_init(adev);
2680 
2681 	r = gfx_v12_1_rlc_resume(adev);
2682 	if (r)
2683 		return r;
2684 
2685 	/*
2686 	 * init golden registers and rlc resume may override some registers,
2687 	 * reconfig them here
2688 	 */
2689 	gfx_v12_1_tcp_harvest(adev);
2690 
2691 	r = gfx_v12_1_cp_resume(adev);
2692 	if (r)
2693 		return r;
2694 
2695 	return r;
2696 }
2697 
2698 static void gfx_v12_1_xcc_fini(struct amdgpu_device *adev,
2699 			      int xcc_id)
2700 {
2701 	uint32_t tmp;
2702 
2703 	if (!adev->no_hw_access) {
2704 		if (amdgpu_gfx_disable_kcq(adev, xcc_id))
2705 			DRM_ERROR("KCQ disable failed\n");
2706 
2707 		amdgpu_mes_kiq_hw_fini(adev, xcc_id);
2708 	}
2709 
2710 	if (amdgpu_sriov_vf(adev)) {
2711 		/* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
2712 		tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
2713 		tmp &= 0xffffff00;
2714 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
2715 	}
2716 	gfx_v12_1_xcc_cp_compute_enable(adev, false, xcc_id);
2717 	gfx_v12_1_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
2718 }
2719 
2720 static int gfx_v12_1_hw_fini(struct amdgpu_ip_block *ip_block)
2721 {
2722 	struct amdgpu_device *adev = ip_block->adev;
2723 	int i, num_xcc;
2724 
2725 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
2726 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
2727 
2728 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2729 	for (i = 0; i < num_xcc; i++) {
2730 		gfx_v12_1_xcc_fini(adev, i);
2731 	}
2732 
2733 	adev->gfxhub.funcs->gart_disable(adev);
2734 
2735 	adev->gfx.is_poweron = false;
2736 
2737 	return 0;
2738 }
2739 
2740 static int gfx_v12_1_suspend(struct amdgpu_ip_block *ip_block)
2741 {
2742 	return gfx_v12_1_hw_fini(ip_block);
2743 }
2744 
2745 static int gfx_v12_1_resume(struct amdgpu_ip_block *ip_block)
2746 {
2747 	return gfx_v12_1_hw_init(ip_block);
2748 }
2749 
2750 static bool gfx_v12_1_is_idle(struct amdgpu_ip_block *ip_block)
2751 {
2752 	struct amdgpu_device *adev = ip_block->adev;
2753 	int i, num_xcc;
2754 
2755 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2756 	for (i = 0; i < num_xcc; i++) {
2757 		if (REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, i),
2758 				regGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE))
2759 			return false;
2760 	}
2761 	return true;
2762 }
2763 
2764 static int gfx_v12_1_wait_for_idle(struct amdgpu_ip_block *ip_block)
2765 {
2766 	unsigned i;
2767 	struct amdgpu_device *adev = ip_block->adev;
2768 
2769 	for (i = 0; i < adev->usec_timeout; i++) {
2770 		if (gfx_v12_1_is_idle(ip_block))
2771 			return 0;
2772 		udelay(1);
2773 	}
2774 	return -ETIMEDOUT;
2775 }
2776 
2777 static uint64_t gfx_v12_1_get_gpu_clock_counter(struct amdgpu_device *adev)
2778 {
2779 	uint64_t clock = 0;
2780 
2781 	if (adev->smuio.funcs &&
2782 	    adev->smuio.funcs->get_gpu_clock_counter)
2783 		clock = adev->smuio.funcs->get_gpu_clock_counter(adev);
2784 	else
2785 		dev_warn(adev->dev, "query gpu clock counter is not supported\n");
2786 
2787 	return clock;
2788 }
2789 
2790 static int gfx_v12_1_early_init(struct amdgpu_ip_block *ip_block)
2791 {
2792 	struct amdgpu_device *adev = ip_block->adev;
2793 
2794 	adev->gfx.funcs = &gfx_v12_1_gfx_funcs;
2795 
2796 	adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
2797 					  AMDGPU_MAX_COMPUTE_RINGS);
2798 
2799 	gfx_v12_1_set_kiq_pm4_funcs(adev);
2800 	gfx_v12_1_set_ring_funcs(adev);
2801 	gfx_v12_1_set_irq_funcs(adev);
2802 	gfx_v12_1_set_rlc_funcs(adev);
2803 	gfx_v12_1_set_mqd_funcs(adev);
2804 	gfx_v12_1_set_imu_funcs(adev);
2805 
2806 	gfx_v12_1_init_rlcg_reg_access_ctrl(adev);
2807 
2808 	return gfx_v12_1_init_microcode(adev);
2809 }
2810 
2811 static int gfx_v12_1_late_init(struct amdgpu_ip_block *ip_block)
2812 {
2813 	struct amdgpu_device *adev = ip_block->adev;
2814 	int r;
2815 
2816 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
2817 	if (r)
2818 		return r;
2819 
2820 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
2821 	if (r)
2822 		return r;
2823 
2824 	return 0;
2825 }
2826 
2827 static bool gfx_v12_1_is_rlc_enabled(struct amdgpu_device *adev)
2828 {
2829 	uint32_t rlc_cntl;
2830 
2831 	/* if RLC is not enabled, do nothing */
2832 	rlc_cntl = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL);
2833 	return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
2834 }
2835 
2836 static void gfx_v12_1_xcc_set_safe_mode(struct amdgpu_device *adev,
2837 					int xcc_id)
2838 {
2839 	uint32_t data;
2840 	unsigned i;
2841 
2842 	data = RLC_SAFE_MODE__CMD_MASK;
2843 	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
2844 
2845 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
2846 
2847 	/* wait for RLC_SAFE_MODE */
2848 	for (i = 0; i < adev->usec_timeout; i++) {
2849 		if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2850 						regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
2851 			break;
2852 		udelay(1);
2853 	}
2854 }
2855 
2856 static void gfx_v12_1_xcc_unset_safe_mode(struct amdgpu_device *adev,
2857 					  int xcc_id)
2858 {
2859 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2860 		     regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK);
2861 }
2862 
2863 static void gfx_v12_1_update_perf_clk(struct amdgpu_device *adev,
2864 				      bool enable)
2865 {
2866 	int i, num_xcc;
2867 
2868 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2869 	for (i = 0; i < num_xcc; i++)
2870 		gfx_v12_1_xcc_update_perf_clk(adev, enable, i);
2871 }
2872 
2873 static void gfx_v12_1_update_spm_vmid(struct amdgpu_device *adev,
2874 				      int xcc_id,
2875 				      struct amdgpu_ring *ring,
2876 				      unsigned vmid)
2877 {
2878 	u32 reg, data;
2879 
2880 	reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL);
2881 	if (amdgpu_sriov_is_pp_one_vf(adev))
2882 		data = RREG32_NO_KIQ(reg);
2883 	else
2884 		data = RREG32(reg);
2885 
2886 	data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
2887 	data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
2888 
2889 	if (amdgpu_sriov_is_pp_one_vf(adev))
2890 		WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL, data);
2891 	else
2892 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL, data);
2893 
2894 	if (ring
2895 	    && amdgpu_sriov_is_pp_one_vf(adev)
2896 	    && ((ring->funcs->type == AMDGPU_RING_TYPE_GFX)
2897 		|| (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE))) {
2898 		uint32_t reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL);
2899 		amdgpu_ring_emit_wreg(ring, reg, data);
2900 	}
2901 }
2902 
2903 static const struct amdgpu_rlc_funcs gfx_v12_1_rlc_funcs = {
2904 	.is_rlc_enabled = gfx_v12_1_is_rlc_enabled,
2905 	.set_safe_mode = gfx_v12_1_xcc_set_safe_mode,
2906 	.unset_safe_mode = gfx_v12_1_xcc_unset_safe_mode,
2907 	.init = gfx_v12_1_rlc_init,
2908 	.get_csb_size = gfx_v12_1_get_csb_size,
2909 	.get_csb_buffer = gfx_v12_1_get_csb_buffer,
2910 	.resume = gfx_v12_1_rlc_resume,
2911 	.stop = gfx_v12_1_rlc_stop,
2912 	.reset = gfx_v12_1_rlc_reset,
2913 	.start = gfx_v12_1_rlc_start,
2914 	.update_spm_vmid = gfx_v12_1_update_spm_vmid,
2915 };
2916 
2917 #if 0
2918 static void gfx_v12_cntl_power_gating(struct amdgpu_device *adev, bool enable)
2919 {
2920 	/* TODO */
2921 }
2922 
2923 static void gfx_v12_cntl_pg(struct amdgpu_device *adev, bool enable)
2924 {
2925 	/* TODO */
2926 }
2927 #endif
2928 
2929 static int gfx_v12_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
2930 					   enum amd_powergating_state state)
2931 {
2932 	struct amdgpu_device *adev = ip_block->adev;
2933 	bool enable = (state == AMD_PG_STATE_GATE);
2934 
2935 	if (amdgpu_sriov_vf(adev))
2936 		return 0;
2937 
2938 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2939 	case IP_VERSION(12, 1, 0):
2940 		amdgpu_gfx_off_ctrl(adev, enable);
2941 		break;
2942 	default:
2943 		break;
2944 	}
2945 
2946 	return 0;
2947 }
2948 
2949 static void gfx_v12_1_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
2950 							   bool enable, int xcc_id)
2951 {
2952 	uint32_t def, data;
2953 
2954 	if (!(adev->cg_flags &
2955 	      (AMD_CG_SUPPORT_GFX_CGCG |
2956 	      AMD_CG_SUPPORT_GFX_CGLS |
2957 	      AMD_CG_SUPPORT_GFX_3D_CGCG |
2958 	      AMD_CG_SUPPORT_GFX_3D_CGLS)))
2959 		return;
2960 
2961 	if (enable) {
2962 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2963 					  regRLC_CGTT_MGCG_OVERRIDE);
2964 
2965 		/* unset CGCG override */
2966 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
2967 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
2968 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2969 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2970 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG ||
2971 		    adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
2972 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
2973 
2974 		/* update CGCG override bits */
2975 		if (def != data)
2976 			WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2977 				     regRLC_CGTT_MGCG_OVERRIDE, data);
2978 
2979 		/* enable cgcg FSM(0x0000363F) */
2980 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2981 
2982 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
2983 			data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK;
2984 			data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
2985 				 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
2986 		}
2987 
2988 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
2989 			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK;
2990 			data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
2991 				 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
2992 		}
2993 
2994 		if (def != data)
2995 			WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2996 				     regRLC_CGCG_CGLS_CTRL, data);
2997 
2998 		/* set IDLE_POLL_COUNT(0x00900100) */
2999 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL);
3000 
3001 		data &= ~CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK;
3002 		data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
3003 		data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3004 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3005 
3006 		if (def != data)
3007 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data);
3008 
3009 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL);
3010 		data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
3011 		data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
3012 		data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
3013 		data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
3014 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL, data);
3015 	} else {
3016 		/* Program RLC_CGCG_CGLS_CTRL */
3017 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
3018 
3019 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
3020 			data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
3021 
3022 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3023 			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3024 
3025 		if (def != data)
3026 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
3027 	}
3028 }
3029 
3030 static void gfx_v12_1_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev,
3031 							   bool enable, int xcc_id)
3032 {
3033 	uint32_t data, def;
3034 	if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)))
3035 		return;
3036 
3037 	/* It is disabled by HW by default */
3038 	if (enable) {
3039 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
3040 			/* 1 - RLC_CGTT_MGCG_OVERRIDE */
3041 			def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
3042 
3043 			data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3044 				  RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
3045 				  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
3046 
3047 			if (def != data)
3048 				WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
3049 		}
3050 	} else {
3051 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
3052 			def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
3053 
3054 			data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
3055 				 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3056 				 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
3057 
3058 			if (def != data)
3059 				WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
3060 		}
3061 	}
3062 }
3063 
3064 static void gfx_v12_1_xcc_update_repeater_fgcg(struct amdgpu_device *adev,
3065 					       bool enable, int xcc_id)
3066 {
3067 	uint32_t def, data;
3068 
3069 	if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
3070 		return;
3071 
3072 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
3073 
3074 	if (enable)
3075 		data &= ~(RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK |
3076 				  RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE_MASK);
3077 	else
3078 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK |
3079 				RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE_MASK;
3080 
3081 	if (def != data)
3082 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
3083 }
3084 
3085 static void gfx_v12_1_xcc_update_sram_fgcg(struct amdgpu_device *adev,
3086 					   bool enable, int xcc_id)
3087 {
3088 	uint32_t def, data;
3089 
3090 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
3091 		return;
3092 
3093 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
3094 
3095 	if (enable)
3096 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
3097 	else
3098 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
3099 
3100 	if (def != data)
3101 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
3102 }
3103 
3104 static void gfx_v12_1_xcc_update_perf_clk(struct amdgpu_device *adev,
3105 					  bool enable, int xcc_id)
3106 {
3107 	uint32_t def, data;
3108 
3109 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK))
3110 		return;
3111 
3112 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
3113 
3114 	if (enable)
3115 		data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
3116 	else
3117 		data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
3118 
3119 	if (def != data)
3120 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
3121 }
3122 
3123 static int gfx_v12_1_xcc_update_gfx_clock_gating(struct amdgpu_device *adev,
3124 					     bool enable, int xcc_id)
3125 {
3126 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
3127 
3128 	gfx_v12_1_xcc_update_coarse_grain_clock_gating(adev, enable, xcc_id);
3129 
3130 	gfx_v12_1_xcc_update_medium_grain_clock_gating(adev, enable, xcc_id);
3131 
3132 	gfx_v12_1_xcc_update_repeater_fgcg(adev, enable, xcc_id);
3133 
3134 	gfx_v12_1_xcc_update_sram_fgcg(adev, enable, xcc_id);
3135 
3136 	gfx_v12_1_xcc_update_perf_clk(adev, enable, xcc_id);
3137 
3138 	if (adev->cg_flags &
3139 	    (AMD_CG_SUPPORT_GFX_MGCG |
3140 	     AMD_CG_SUPPORT_GFX_CGLS |
3141 	     AMD_CG_SUPPORT_GFX_CGCG |
3142 	     AMD_CG_SUPPORT_GFX_3D_CGCG |
3143 	     AMD_CG_SUPPORT_GFX_3D_CGLS))
3144 		gfx_v12_1_xcc_enable_gui_idle_interrupt(adev, enable, xcc_id);
3145 
3146 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
3147 
3148 	return 0;
3149 }
3150 
3151 static int gfx_v12_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
3152 					   enum amd_clockgating_state state)
3153 {
3154 	struct amdgpu_device *adev = ip_block->adev;
3155 	int i, num_xcc;
3156 
3157 	if (amdgpu_sriov_vf(adev))
3158 		return 0;
3159 
3160 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3161 	switch (adev->ip_versions[GC_HWIP][0]) {
3162 	case IP_VERSION(12, 1, 0):
3163 		for (i = 0; i < num_xcc; i++)
3164 			gfx_v12_1_xcc_update_gfx_clock_gating(adev,
3165 				  state == AMD_CG_STATE_GATE, i);
3166 		break;
3167 	default:
3168 		break;
3169 	}
3170 
3171 	return 0;
3172 }
3173 
3174 static void gfx_v12_1_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
3175 {
3176 	struct amdgpu_device *adev = ip_block->adev;
3177 	int data;
3178 
3179 	/* AMD_CG_SUPPORT_GFX_MGCG */
3180 	data = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CGTT_MGCG_OVERRIDE);
3181 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
3182 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
3183 
3184 	/* AMD_CG_SUPPORT_REPEATER_FGCG */
3185 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK))
3186 		*flags |= AMD_CG_SUPPORT_REPEATER_FGCG;
3187 
3188 	/* AMD_CG_SUPPORT_GFX_FGCG */
3189 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK))
3190 		*flags |= AMD_CG_SUPPORT_GFX_FGCG;
3191 
3192 	/* AMD_CG_SUPPORT_GFX_PERF_CLK */
3193 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK))
3194 		*flags |= AMD_CG_SUPPORT_GFX_PERF_CLK;
3195 
3196 	/* AMD_CG_SUPPORT_GFX_CGCG */
3197 	data = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CGCG_CGLS_CTRL);
3198 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
3199 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
3200 
3201 	/* AMD_CG_SUPPORT_GFX_CGLS */
3202 	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
3203 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
3204 }
3205 
3206 static u64 gfx_v12_1_ring_get_rptr_compute(struct amdgpu_ring *ring)
3207 {
3208 	/* gfx12 hardware is 32bit rptr */
3209 	return *(uint32_t *)ring->rptr_cpu_addr;
3210 }
3211 
3212 static u64 gfx_v12_1_ring_get_wptr_compute(struct amdgpu_ring *ring)
3213 {
3214 	u64 wptr;
3215 
3216 	/* XXX check if swapping is necessary on BE */
3217 	if (ring->use_doorbell)
3218 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
3219 	else
3220 		BUG();
3221 	return wptr;
3222 }
3223 
3224 static void gfx_v12_1_ring_set_wptr_compute(struct amdgpu_ring *ring)
3225 {
3226 	struct amdgpu_device *adev = ring->adev;
3227 
3228 	/* XXX check if swapping is necessary on BE */
3229 	if (ring->use_doorbell) {
3230 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
3231 			     ring->wptr);
3232 		WDOORBELL64(ring->doorbell_index, ring->wptr);
3233 	} else {
3234 		BUG(); /* only DOORBELL method supported on gfx12 now */
3235 	}
3236 }
3237 
3238 static void gfx_v12_1_ring_emit_ib_compute(struct amdgpu_ring *ring,
3239 					   struct amdgpu_job *job,
3240 					   struct amdgpu_ib *ib,
3241 					   uint32_t flags)
3242 {
3243 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
3244 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
3245 
3246 	/* Currently, there is a high possibility to get wave ID mismatch
3247 	 * between ME and GDS, leading to a hw deadlock, because ME generates
3248 	 * different wave IDs than the GDS expects. This situation happens
3249 	 * randomly when at least 5 compute pipes use GDS ordered append.
3250 	 * The wave IDs generated by ME are also wrong after suspend/resume.
3251 	 * Those are probably bugs somewhere else in the kernel driver.
3252 	 *
3253 	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
3254 	 * GDS to 0 for this ring (me/pipe).
3255 	 */
3256 	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
3257 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3258 		amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
3259 	}
3260 
3261 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3262 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3263 	amdgpu_ring_write(ring,
3264 #ifdef __BIG_ENDIAN
3265 				(2 << 0) |
3266 #endif
3267 				lower_32_bits(ib->gpu_addr));
3268 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
3269 	amdgpu_ring_write(ring, control);
3270 }
3271 
3272 static void gfx_v12_1_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
3273 				     u64 seq, unsigned flags)
3274 {
3275 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
3276 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
3277 
3278 	/* RELEASE_MEM - flush caches, send int */
3279 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
3280 	amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ(1) |
3281 				 PACKET3_RELEASE_MEM_GCR_GLV_WB |
3282 				 PACKET3_RELEASE_MEM_GCR_GL2_WB |
3283 				 PACKET3_RELEASE_MEM_GCR_GL2_SCOPE(2) |
3284 				 PACKET3_RELEASE_MEM_TEMPORAL(3) |
3285 				 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
3286 				 PACKET3_RELEASE_MEM_EVENT_INDEX(5)));
3287 	amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) |
3288 				 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0)));
3289 
3290 	/*
3291 	 * the address should be Qword aligned if 64bit write, Dword
3292 	 * aligned if only send 32bit data low (discard data high)
3293 	 */
3294 	if (write64bit)
3295 		BUG_ON(addr & 0x7);
3296 	else
3297 		BUG_ON(addr & 0x3);
3298 	amdgpu_ring_write(ring, lower_32_bits(addr));
3299 	amdgpu_ring_write(ring, upper_32_bits(addr));
3300 	amdgpu_ring_write(ring, lower_32_bits(seq));
3301 	amdgpu_ring_write(ring, upper_32_bits(seq));
3302 	amdgpu_ring_write(ring, 0);
3303 }
3304 
3305 static void gfx_v12_1_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
3306 {
3307 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3308 	uint32_t seq = ring->fence_drv.sync_seq;
3309 	uint64_t addr = ring->fence_drv.gpu_addr;
3310 
3311 	gfx_v12_1_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr),
3312 			       upper_32_bits(addr), seq, 0xffffffff, 4);
3313 }
3314 
3315 static void gfx_v12_1_ring_invalidate_tlbs(struct amdgpu_ring *ring,
3316 				   uint16_t pasid, uint32_t flush_type,
3317 				   bool all_hub, uint8_t dst_sel)
3318 {
3319 	amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
3320 	amdgpu_ring_write(ring,
3321 			  PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) |
3322 			  PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
3323 			  PACKET3_INVALIDATE_TLBS_PASID(pasid) |
3324 			  PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
3325 }
3326 
3327 static void gfx_v12_1_ring_emit_vm_flush(struct amdgpu_ring *ring,
3328 					 unsigned vmid, uint64_t pd_addr)
3329 {
3330 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
3331 
3332 	/* compute doesn't have PFP */
3333 	if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
3334 		/* sync PFP to ME, otherwise we might get invalid PFP reads */
3335 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3336 		amdgpu_ring_write(ring, 0x0);
3337 	}
3338 }
3339 
3340 static void gfx_v12_1_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
3341 					  u64 seq, unsigned int flags)
3342 {
3343 	struct amdgpu_device *adev = ring->adev;
3344 
3345 	/* we only allocate 32bit for each seq wb address */
3346 	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
3347 
3348 	/* write fence seq to the "addr" */
3349 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3350 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3351 				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
3352 	amdgpu_ring_write(ring, lower_32_bits(addr));
3353 	amdgpu_ring_write(ring, upper_32_bits(addr));
3354 	amdgpu_ring_write(ring, lower_32_bits(seq));
3355 
3356 	if (flags & AMDGPU_FENCE_FLAG_INT) {
3357 		/* set register to trigger INT */
3358 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3359 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3360 					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
3361 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCPC_INT_STATUS));
3362 		amdgpu_ring_write(ring, 0);
3363 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
3364 	}
3365 }
3366 
3367 static void gfx_v12_1_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
3368 				     uint32_t reg_val_offs)
3369 {
3370 	struct amdgpu_device *adev = ring->adev;
3371 
3372 	reg = gfx_v12_1_normalize_xcc_reg_offset(reg);
3373 
3374 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
3375 	amdgpu_ring_write(ring, 0 |	/* src: register*/
3376 				(5 << 8) |	/* dst: memory */
3377 				(1 << 20));	/* write confirm */
3378 	amdgpu_ring_write(ring, reg);
3379 	amdgpu_ring_write(ring, 0);
3380 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
3381 				reg_val_offs * 4));
3382 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
3383 				reg_val_offs * 4));
3384 }
3385 
3386 static void gfx_v12_1_ring_emit_wreg(struct amdgpu_ring *ring,
3387 				     uint32_t reg,
3388 				     uint32_t val)
3389 {
3390 	uint32_t cmd = 0;
3391 
3392 	reg = gfx_v12_1_normalize_xcc_reg_offset(reg);
3393 
3394 	switch (ring->funcs->type) {
3395 	case AMDGPU_RING_TYPE_KIQ:
3396 		cmd = (1 << 16); /* no inc addr */
3397 		break;
3398 	default:
3399 		cmd = WR_CONFIRM;
3400 		break;
3401 	}
3402 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3403 	amdgpu_ring_write(ring, cmd);
3404 	amdgpu_ring_write(ring, reg);
3405 	amdgpu_ring_write(ring, 0);
3406 	amdgpu_ring_write(ring, val);
3407 }
3408 
3409 static void gfx_v12_1_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
3410 					uint32_t val, uint32_t mask)
3411 {
3412 	gfx_v12_1_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
3413 }
3414 
3415 static void gfx_v12_1_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
3416 						   uint32_t reg0, uint32_t reg1,
3417 						   uint32_t ref, uint32_t mask)
3418 {
3419 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3420 
3421 	gfx_v12_1_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
3422 			       ref, mask, 0x20);
3423 }
3424 
3425 static void gfx_v12_1_xcc_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
3426 							int me, int pipe,
3427 							enum amdgpu_interrupt_state state,
3428 							int xcc_id)
3429 {
3430 	u32 mec_int_cntl, mec_int_cntl_reg;
3431 
3432 	/*
3433 	 * amdgpu controls only the first MEC. That's why this function only
3434 	 * handles the setting of interrupts for this specific MEC. All other
3435 	 * pipes' interrupts are set by amdkfd.
3436 	 */
3437 
3438 	if (me == 1) {
3439 		switch (pipe) {
3440 		case 0:
3441 			mec_int_cntl_reg = SOC15_REG_OFFSET(
3442 					GC, GET_INST(GC, xcc_id),
3443 					regCP_ME1_PIPE0_INT_CNTL);
3444 			break;
3445 		case 1:
3446 			mec_int_cntl_reg = SOC15_REG_OFFSET(
3447 					GC, GET_INST(GC, xcc_id),
3448 					regCP_ME1_PIPE1_INT_CNTL);
3449 			break;
3450 		case 2:
3451 			mec_int_cntl_reg = SOC15_REG_OFFSET(
3452 					GC, GET_INST(GC, xcc_id),
3453 					regCP_ME1_PIPE2_INT_CNTL);
3454 			break;
3455 		case 3:
3456 			mec_int_cntl_reg = SOC15_REG_OFFSET(
3457 					GC, GET_INST(GC, xcc_id),
3458 					regCP_ME1_PIPE3_INT_CNTL);
3459 			break;
3460 		default:
3461 			DRM_DEBUG("invalid pipe %d\n", pipe);
3462 			return;
3463 		}
3464 	} else {
3465 		DRM_DEBUG("invalid me %d\n", me);
3466 		return;
3467 	}
3468 
3469 	switch (state) {
3470 	case AMDGPU_IRQ_STATE_DISABLE:
3471 		mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3472 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3473 					     TIME_STAMP_INT_ENABLE, 0);
3474 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3475 					     GENERIC0_INT_ENABLE, 0);
3476 		WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3477 		break;
3478 	case AMDGPU_IRQ_STATE_ENABLE:
3479 		mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3480 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3481 					     TIME_STAMP_INT_ENABLE, 1);
3482 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3483 					     GENERIC0_INT_ENABLE, 1);
3484 		WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3485 		break;
3486 	default:
3487 		break;
3488 	}
3489 }
3490 
3491 static int gfx_v12_1_set_eop_interrupt_state(struct amdgpu_device *adev,
3492 					    struct amdgpu_irq_src *src,
3493 					    unsigned type,
3494 					    enum amdgpu_interrupt_state state)
3495 {
3496 	int i, num_xcc;
3497 
3498 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3499 	for (i = 0; i < num_xcc; i++) {
3500 		switch (type) {
3501 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
3502 			gfx_v12_1_xcc_set_compute_eop_interrupt_state(
3503 					adev, 1, 0, state, i);
3504 			break;
3505 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
3506 			gfx_v12_1_xcc_set_compute_eop_interrupt_state(
3507 					adev, 1, 1, state, i);
3508 			break;
3509 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
3510 			gfx_v12_1_xcc_set_compute_eop_interrupt_state(
3511 					adev, 1, 2, state, i);
3512 			break;
3513 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
3514 			gfx_v12_1_xcc_set_compute_eop_interrupt_state(
3515 					adev, 1, 3, state, i);
3516 			break;
3517 		default:
3518 			break;
3519 		}
3520 	}
3521 
3522 	return 0;
3523 }
3524 
3525 static int gfx_v12_1_eop_irq(struct amdgpu_device *adev,
3526 			     struct amdgpu_irq_src *source,
3527 			     struct amdgpu_iv_entry *entry)
3528 {
3529 	int i, xcc_id;
3530 	u8 me_id, pipe_id, queue_id;
3531 	struct amdgpu_ring *ring;
3532 	uint32_t mes_queue_id = entry->src_data[0];
3533 
3534 	DRM_DEBUG("IH: CP EOP\n");
3535 
3536 	if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
3537 		struct amdgpu_mes_queue *queue;
3538 
3539 		mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
3540 
3541 		spin_lock(&adev->mes.queue_id_lock);
3542 		queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
3543 		if (queue) {
3544 			DRM_DEBUG("process mes queue id = %d\n", mes_queue_id);
3545 			amdgpu_fence_process(queue->ring);
3546 		}
3547 		spin_unlock(&adev->mes.queue_id_lock);
3548 	} else {
3549 		me_id = (entry->ring_id & 0x0c) >> 2;
3550 		pipe_id = (entry->ring_id & 0x03) >> 0;
3551 		queue_id = (entry->ring_id & 0x70) >> 4;
3552 		xcc_id = gfx_v12_1_ih_to_xcc_inst(adev, entry->node_id);
3553 
3554 		if (xcc_id == -EINVAL)
3555 			return -EINVAL;
3556 
3557 		switch (me_id) {
3558 		case 0:
3559 			if (pipe_id == 0)
3560 				amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
3561 			else
3562 				amdgpu_fence_process(&adev->gfx.gfx_ring[1]);
3563 			break;
3564 		case 1:
3565 		case 2:
3566 			for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3567 				ring = &adev->gfx.compute_ring
3568 						[i +
3569 						 xcc_id * adev->gfx.num_compute_rings];
3570 				/* Per-queue interrupt is supported for MEC starting from VI.
3571 				 * The interrupt can only be enabled/disabled per pipe instead
3572 				 * of per queue.
3573 				 */
3574 				if ((ring->me == me_id) &&
3575 				    (ring->pipe == pipe_id) &&
3576 				    (ring->queue == queue_id))
3577 					amdgpu_fence_process(ring);
3578 			}
3579 			break;
3580 		}
3581 	}
3582 
3583 	return 0;
3584 }
3585 
3586 static int gfx_v12_1_set_priv_reg_fault_state(struct amdgpu_device *adev,
3587 					      struct amdgpu_irq_src *source,
3588 					      unsigned type,
3589 					      enum amdgpu_interrupt_state state)
3590 {
3591 	int i, num_xcc;
3592 
3593 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3594 	switch (state) {
3595 	case AMDGPU_IRQ_STATE_DISABLE:
3596 	case AMDGPU_IRQ_STATE_ENABLE:
3597 		for (i = 0; i < num_xcc; i++)
3598 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3599 					      PRIV_REG_INT_ENABLE,
3600 					      state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3601 		break;
3602 	default:
3603 		break;
3604 	}
3605 
3606 	return 0;
3607 }
3608 
3609 static int gfx_v12_1_set_priv_inst_fault_state(struct amdgpu_device *adev,
3610 					       struct amdgpu_irq_src *source,
3611 					       unsigned type,
3612 					       enum amdgpu_interrupt_state state)
3613 {
3614 	int i, num_xcc;
3615 
3616 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3617 	switch (state) {
3618 	case AMDGPU_IRQ_STATE_DISABLE:
3619 	case AMDGPU_IRQ_STATE_ENABLE:
3620 		for (i = 0; i < num_xcc; i++)
3621 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3622 				       PRIV_INSTR_INT_ENABLE,
3623 				       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3624 		break;
3625 	default:
3626 		break;
3627 	}
3628 
3629 	return 0;
3630 }
3631 
3632 static void gfx_v12_1_handle_priv_fault(struct amdgpu_device *adev,
3633 					struct amdgpu_iv_entry *entry)
3634 {
3635 	u8 me_id, pipe_id, queue_id;
3636 	struct amdgpu_ring *ring;
3637 	int i, xcc_id;
3638 
3639 	me_id = (entry->ring_id & 0x0c) >> 2;
3640 	pipe_id = (entry->ring_id & 0x03) >> 0;
3641 	queue_id = (entry->ring_id & 0x70) >> 4;
3642 	xcc_id = gfx_v12_1_ih_to_xcc_inst(adev, entry->node_id);
3643 
3644 	if (xcc_id == -EINVAL)
3645 		return;
3646 
3647 	switch (me_id) {
3648 	case 0:
3649 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3650 			ring = &adev->gfx.gfx_ring[i];
3651 			/* we only enabled 1 gfx queue per pipe for now */
3652 			if (ring->me == me_id && ring->pipe == pipe_id)
3653 				drm_sched_fault(&ring->sched);
3654 		}
3655 		break;
3656 	case 1:
3657 	case 2:
3658 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3659 			ring = &adev->gfx.compute_ring
3660 					[i +
3661 					 xcc_id * adev->gfx.num_compute_rings];
3662 			if (ring->me == me_id && ring->pipe == pipe_id &&
3663 			    ring->queue == queue_id)
3664 				drm_sched_fault(&ring->sched);
3665 		}
3666 		break;
3667 	default:
3668 		BUG();
3669 		break;
3670 	}
3671 }
3672 
3673 static int gfx_v12_1_priv_reg_irq(struct amdgpu_device *adev,
3674 				  struct amdgpu_irq_src *source,
3675 				  struct amdgpu_iv_entry *entry)
3676 {
3677 	DRM_ERROR("Illegal register access in command stream\n");
3678 	gfx_v12_1_handle_priv_fault(adev, entry);
3679 	return 0;
3680 }
3681 
3682 static int gfx_v12_1_priv_inst_irq(struct amdgpu_device *adev,
3683 				   struct amdgpu_irq_src *source,
3684 				   struct amdgpu_iv_entry *entry)
3685 {
3686 	DRM_ERROR("Illegal instruction in command stream\n");
3687 	gfx_v12_1_handle_priv_fault(adev, entry);
3688 	return 0;
3689 }
3690 
3691 static void gfx_v12_1_emit_mem_sync(struct amdgpu_ring *ring)
3692 {
3693 	const unsigned int gcr_cntl =
3694 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) |
3695 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) |
3696 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) |
3697 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) |
3698 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1) |
3699 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_SCOPE(2);
3700 
3701 	/* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */
3702 	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6));
3703 	amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */
3704 	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
3705 	amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
3706 	amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
3707 	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
3708 	amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
3709 	amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
3710 }
3711 
3712 static const struct amd_ip_funcs gfx_v12_1_ip_funcs = {
3713 	.name = "gfx_v12_1",
3714 	.early_init = gfx_v12_1_early_init,
3715 	.late_init = gfx_v12_1_late_init,
3716 	.sw_init = gfx_v12_1_sw_init,
3717 	.sw_fini = gfx_v12_1_sw_fini,
3718 	.hw_init = gfx_v12_1_hw_init,
3719 	.hw_fini = gfx_v12_1_hw_fini,
3720 	.suspend = gfx_v12_1_suspend,
3721 	.resume = gfx_v12_1_resume,
3722 	.is_idle = gfx_v12_1_is_idle,
3723 	.wait_for_idle = gfx_v12_1_wait_for_idle,
3724 	.set_clockgating_state = gfx_v12_1_set_clockgating_state,
3725 	.set_powergating_state = gfx_v12_1_set_powergating_state,
3726 	.get_clockgating_state = gfx_v12_1_get_clockgating_state,
3727 };
3728 
3729 static const struct amdgpu_ring_funcs gfx_v12_1_ring_funcs_compute = {
3730 	.type = AMDGPU_RING_TYPE_COMPUTE,
3731 	.align_mask = 0xff,
3732 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
3733 	.support_64bit_ptrs = true,
3734 	.get_rptr = gfx_v12_1_ring_get_rptr_compute,
3735 	.get_wptr = gfx_v12_1_ring_get_wptr_compute,
3736 	.set_wptr = gfx_v12_1_ring_set_wptr_compute,
3737 	.emit_frame_size =
3738 		7 + /* gfx_v12_1_ring_emit_pipeline_sync */
3739 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
3740 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
3741 		2 + /* gfx_v12_1_ring_emit_vm_flush */
3742 		8 + 8 + 8 + /* gfx_v12_1_ring_emit_fence x3 for user fence, vm fence */
3743 		8, /* gfx_v12_1_emit_mem_sync */
3744 	.emit_ib_size =	7, /* gfx_v12_1_ring_emit_ib_compute */
3745 	.emit_ib = gfx_v12_1_ring_emit_ib_compute,
3746 	.emit_fence = gfx_v12_1_ring_emit_fence,
3747 	.emit_pipeline_sync = gfx_v12_1_ring_emit_pipeline_sync,
3748 	.emit_vm_flush = gfx_v12_1_ring_emit_vm_flush,
3749 	.test_ring = gfx_v12_1_ring_test_ring,
3750 	.test_ib = gfx_v12_1_ring_test_ib,
3751 	.insert_nop = amdgpu_ring_insert_nop,
3752 	.pad_ib = amdgpu_ring_generic_pad_ib,
3753 	.emit_wreg = gfx_v12_1_ring_emit_wreg,
3754 	.emit_reg_wait = gfx_v12_1_ring_emit_reg_wait,
3755 	.emit_reg_write_reg_wait = gfx_v12_1_ring_emit_reg_write_reg_wait,
3756 	.emit_mem_sync = gfx_v12_1_emit_mem_sync,
3757 };
3758 
3759 static const struct amdgpu_ring_funcs gfx_v12_1_ring_funcs_kiq = {
3760 	.type = AMDGPU_RING_TYPE_KIQ,
3761 	.align_mask = 0xff,
3762 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
3763 	.support_64bit_ptrs = true,
3764 	.get_rptr = gfx_v12_1_ring_get_rptr_compute,
3765 	.get_wptr = gfx_v12_1_ring_get_wptr_compute,
3766 	.set_wptr = gfx_v12_1_ring_set_wptr_compute,
3767 	.emit_frame_size =
3768 		7 + /* gfx_v12_1_ring_emit_pipeline_sync */
3769 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
3770 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
3771 		2 + /* gfx_v12_1_ring_emit_vm_flush */
3772 		8 + 8 + 8, /* gfx_v12_1_ring_emit_fence_kiq x3 for user fence, vm fence */
3773 	.emit_ib_size =	7, /* gfx_v12_1_ring_emit_ib_compute */
3774 	.emit_ib = gfx_v12_1_ring_emit_ib_compute,
3775 	.emit_fence = gfx_v12_1_ring_emit_fence_kiq,
3776 	.test_ring = gfx_v12_1_ring_test_ring,
3777 	.test_ib = gfx_v12_1_ring_test_ib,
3778 	.insert_nop = amdgpu_ring_insert_nop,
3779 	.pad_ib = amdgpu_ring_generic_pad_ib,
3780 	.emit_rreg = gfx_v12_1_ring_emit_rreg,
3781 	.emit_wreg = gfx_v12_1_ring_emit_wreg,
3782 	.emit_reg_wait = gfx_v12_1_ring_emit_reg_wait,
3783 	.emit_reg_write_reg_wait = gfx_v12_1_ring_emit_reg_write_reg_wait,
3784 };
3785 
3786 static void gfx_v12_1_set_ring_funcs(struct amdgpu_device *adev)
3787 {
3788 	int i, j, num_xcc;
3789 
3790 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3791 	for (i = 0; i < num_xcc; i++) {
3792 		adev->gfx.kiq[i].ring.funcs = &gfx_v12_1_ring_funcs_kiq;
3793 
3794 		for (j = 0; j < adev->gfx.num_compute_rings; j++)
3795 			adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs =
3796 						&gfx_v12_1_ring_funcs_compute;
3797 	}
3798 }
3799 
3800 static const struct amdgpu_irq_src_funcs gfx_v12_1_eop_irq_funcs = {
3801 	.set = gfx_v12_1_set_eop_interrupt_state,
3802 	.process = gfx_v12_1_eop_irq,
3803 };
3804 
3805 static const struct amdgpu_irq_src_funcs gfx_v12_1_priv_reg_irq_funcs = {
3806 	.set = gfx_v12_1_set_priv_reg_fault_state,
3807 	.process = gfx_v12_1_priv_reg_irq,
3808 };
3809 
3810 static const struct amdgpu_irq_src_funcs gfx_v12_1_priv_inst_irq_funcs = {
3811 	.set = gfx_v12_1_set_priv_inst_fault_state,
3812 	.process = gfx_v12_1_priv_inst_irq,
3813 };
3814 
3815 static void gfx_v12_1_set_irq_funcs(struct amdgpu_device *adev)
3816 {
3817 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
3818 	adev->gfx.eop_irq.funcs = &gfx_v12_1_eop_irq_funcs;
3819 
3820 	adev->gfx.priv_reg_irq.num_types = 1;
3821 	adev->gfx.priv_reg_irq.funcs = &gfx_v12_1_priv_reg_irq_funcs;
3822 
3823 	adev->gfx.priv_inst_irq.num_types = 1;
3824 	adev->gfx.priv_inst_irq.funcs = &gfx_v12_1_priv_inst_irq_funcs;
3825 }
3826 
3827 static void gfx_v12_1_set_imu_funcs(struct amdgpu_device *adev)
3828 {
3829 	if (adev->flags & AMD_IS_APU)
3830 		adev->gfx.imu.mode = MISSION_MODE;
3831 	else
3832 		adev->gfx.imu.mode = DEBUG_MODE;
3833 
3834 	adev->gfx.imu.funcs = &gfx_v12_1_imu_funcs;
3835 }
3836 
3837 static void gfx_v12_1_set_rlc_funcs(struct amdgpu_device *adev)
3838 {
3839 	adev->gfx.rlc.funcs = &gfx_v12_1_rlc_funcs;
3840 }
3841 
3842 static void gfx_v12_1_set_mqd_funcs(struct amdgpu_device *adev)
3843 {
3844 	/* set compute eng mqd */
3845 	adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size =
3846 		sizeof(struct v12_1_compute_mqd);
3847 	adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd =
3848 		gfx_v12_1_compute_mqd_init;
3849 }
3850 
3851 static void gfx_v12_1_set_user_cu_inactive_bitmap_per_sh(struct amdgpu_device *adev,
3852 							  u32 bitmap, int xcc_id)
3853 {
3854 	u32 data;
3855 
3856 	if (!bitmap)
3857 		return;
3858 
3859 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
3860 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
3861 
3862 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data);
3863 }
3864 
3865 static u32 gfx_v12_1_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev,
3866 						 int xcc_id)
3867 {
3868 	u32 data, mask;
3869 
3870 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG);
3871 	data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG);
3872 
3873 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
3874 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
3875 
3876 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
3877 
3878 	return (~data) & mask;
3879 }
3880 
3881 static int gfx_v12_1_get_cu_info(struct amdgpu_device *adev,
3882 				 struct amdgpu_cu_info *cu_info)
3883 {
3884 	int i, j, k, counter, xcc_id, active_cu_number = 0;
3885 	u32 mask, bitmap;
3886 	unsigned int disable_masks[2 * 2];
3887 
3888 	if (!adev || !cu_info)
3889 		return -EINVAL;
3890 
3891 	if (adev->gfx.config.max_shader_engines > 2 ||
3892 	    adev->gfx.config.max_sh_per_se > 2) {
3893 		dev_err(adev->dev,
3894 			"Max SE (%d) and Max SA per SE (%d) is greater than expected\n",
3895 			adev->gfx.config.max_shader_engines,
3896 			adev->gfx.config.max_sh_per_se);
3897 		return -EINVAL;
3898 	}
3899 
3900 	amdgpu_gfx_parse_disable_cu(disable_masks,
3901 				    adev->gfx.config.max_shader_engines,
3902 				    adev->gfx.config.max_sh_per_se);
3903 
3904 	mutex_lock(&adev->grbm_idx_mutex);
3905 	for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
3906 		for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3907 			for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3908 				bitmap = i * adev->gfx.config.max_sh_per_se + j;
3909 				if (!((gfx_v12_1_get_sa_active_bitmap(adev, xcc_id) >> bitmap) & 1))
3910 					continue;
3911 				mask = 1;
3912 				counter = 0;
3913 				gfx_v12_1_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id);
3914 				gfx_v12_1_set_user_cu_inactive_bitmap_per_sh(
3915 					adev,
3916 					disable_masks[i * adev->gfx.config.max_sh_per_se + j],
3917 					xcc_id);
3918 				bitmap = gfx_v12_1_get_cu_active_bitmap_per_sh(adev, xcc_id);
3919 
3920 				cu_info->bitmap[xcc_id][i][j] = bitmap;
3921 
3922 				for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
3923 					if (bitmap & mask)
3924 						counter++;
3925 
3926 					mask <<= 1;
3927 				}
3928 				active_cu_number += counter;
3929 			}
3930 		}
3931 		gfx_v12_1_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, xcc_id);
3932 	}
3933 	mutex_unlock(&adev->grbm_idx_mutex);
3934 
3935 	cu_info->number = active_cu_number;
3936 	cu_info->simd_per_cu = NUM_SIMD_PER_CU_GFX12_1;
3937 	cu_info->lds_size = 320;
3938 
3939 	return 0;
3940 }
3941 
3942 const struct amdgpu_ip_block_version gfx_v12_1_ip_block = {
3943 	.type = AMD_IP_BLOCK_TYPE_GFX,
3944 	.major = 12,
3945 	.minor = 1,
3946 	.rev = 0,
3947 	.funcs = &gfx_v12_1_ip_funcs,
3948 };
3949 
3950 static int gfx_v12_1_xcp_resume(void *handle, uint32_t inst_mask)
3951 {
3952 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3953 	uint32_t tmp_mask;
3954 	int i, r;
3955 
3956 	/* TODO : Initialize golden regs */
3957 	/* gfx_v12_1_init_golden_registers(adev); */
3958 
3959 	tmp_mask = inst_mask;
3960 	for_each_inst(i, tmp_mask)
3961 		gfx_v12_1_xcc_constants_init(adev, i);
3962 
3963 	if (!amdgpu_sriov_vf(adev)) {
3964 		tmp_mask = inst_mask;
3965 		for_each_inst(i, tmp_mask) {
3966 			r = gfx_v12_1_xcc_rlc_resume(adev, i);
3967 			if (r)
3968 				return r;
3969 		}
3970 	}
3971 
3972 	r = gfx_v12_1_xcc_cp_resume(adev, inst_mask);
3973 
3974 	return r;
3975 }
3976 
3977 static int gfx_v12_1_xcp_suspend(void *handle, uint32_t inst_mask)
3978 {
3979 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3980 	int i;
3981 
3982 	for_each_inst(i, inst_mask)
3983 		gfx_v12_1_xcc_fini(adev, i);
3984 
3985 	return 0;
3986 }
3987 
3988 struct amdgpu_xcp_ip_funcs gfx_v12_1_xcp_funcs = {
3989 	.suspend = &gfx_v12_1_xcp_suspend,
3990 	.resume = &gfx_v12_1_xcp_resume
3991 };
3992