xref: /linux/drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c (revision 8b971ce0cbc71a10f1d19d2bb6f3dc5c6f07d9d9)
1 /*
2  * Copyright 2025 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/delay.h>
24 #include <linux/kernel.h>
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include "amdgpu.h"
29 #include "amdgpu_gfx.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_smu.h"
32 #include "amdgpu_atomfirmware.h"
33 #include "imu_v12_0.h"
34 #include "soc_v1_0.h"
35 #include "gfx_v12_1_pkt.h"
36 
37 #include "gc/gc_12_1_0_offset.h"
38 #include "gc/gc_12_1_0_sh_mask.h"
39 #include "soc24_enum.h"
40 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
41 
42 #include "soc15.h"
43 #include "clearstate_gfx12.h"
44 #include "v12_structs.h"
45 #include "gfx_v12_1.h"
46 #include "mes_v12_1.h"
47 
48 #define GFX12_MEC_HPD_SIZE	2048
49 
50 #define RLCG_UCODE_LOADING_START_ADDRESS	0x00002000L
51 
52 MODULE_FIRMWARE("amdgpu/gc_12_1_0_mec.bin");
53 MODULE_FIRMWARE("amdgpu/gc_12_1_0_rlc.bin");
54 
55 #define SH_MEM_ALIGNMENT_MODE_UNALIGNED_GFX12_1_0	0x00000001
56 #define DEFAULT_SH_MEM_CONFIG \
57 	((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
58 	 (SH_MEM_ALIGNMENT_MODE_UNALIGNED_GFX12_1_0 << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
59 	 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
60 
61 static void gfx_v12_1_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id);
62 static void gfx_v12_1_set_ring_funcs(struct amdgpu_device *adev);
63 static void gfx_v12_1_set_irq_funcs(struct amdgpu_device *adev);
64 static void gfx_v12_1_set_rlc_funcs(struct amdgpu_device *adev);
65 static void gfx_v12_1_set_mqd_funcs(struct amdgpu_device *adev);
66 static void gfx_v12_1_set_imu_funcs(struct amdgpu_device *adev);
67 static int gfx_v12_1_get_cu_info(struct amdgpu_device *adev,
68 				 struct amdgpu_cu_info *cu_info);
69 static uint64_t gfx_v12_1_get_gpu_clock_counter(struct amdgpu_device *adev);
70 static void gfx_v12_1_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num,
71 				       u32 sh_num, u32 instance, int xcc_id);
72 static u32 gfx_v12_1_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev,
73 						  int xcc_id);
74 
75 static void gfx_v12_1_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
76 				     uint32_t val);
77 static int gfx_v12_1_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
78 static void gfx_v12_1_ring_invalidate_tlbs(struct amdgpu_ring *ring,
79 					   uint16_t pasid, uint32_t flush_type,
80 					   bool all_hub, uint8_t dst_sel);
81 static void gfx_v12_1_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
82 static void gfx_v12_1_xcc_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
83 static void gfx_v12_1_update_perf_clk(struct amdgpu_device *adev,
84 				      bool enable);
85 static void gfx_v12_1_xcc_update_perf_clk(struct amdgpu_device *adev,
86 					 bool enable, int xcc_id);
87 
88 static void gfx_v12_1_kiq_set_resources(struct amdgpu_ring *kiq_ring,
89 					uint64_t queue_mask)
90 {
91 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
92 	amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
93 			  PACKET3_SET_RESOURCES_QUEUE_TYPE(0));	/* vmid_mask:0 queue_type:0 (KIQ) */
94 	amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask));	/* queue mask lo */
95 	amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask));	/* queue mask hi */
96 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask lo */
97 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask hi */
98 	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
99 	amdgpu_ring_write(kiq_ring, 0);
100 }
101 
102 static void gfx_v12_1_kiq_map_queues(struct amdgpu_ring *kiq_ring,
103 				     struct amdgpu_ring *ring)
104 {
105 	uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
106 	uint64_t wptr_addr = ring->wptr_gpu_addr;
107 	uint32_t me = 0, eng_sel = 0;
108 
109 	switch (ring->funcs->type) {
110 	case AMDGPU_RING_TYPE_COMPUTE:
111 		me = 1;
112 		eng_sel = 0;
113 		break;
114 	case AMDGPU_RING_TYPE_MES:
115 		me = 2;
116 		eng_sel = 5;
117 		break;
118 	default:
119 		WARN_ON(1);
120 	}
121 
122 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
123 	/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
124 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
125 			  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
126 			  PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
127 			  PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
128 			  PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
129 			  PACKET3_MAP_QUEUES_ME((me)) |
130 			  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
131 			  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
132 			  PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
133 			  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
134 	amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
135 	amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
136 	amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
137 	amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
138 	amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
139 }
140 
141 static void gfx_v12_1_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
142 				       struct amdgpu_ring *ring,
143 				       enum amdgpu_unmap_queues_action action,
144 				       u64 gpu_addr, u64 seq)
145 {
146 	struct amdgpu_device *adev = kiq_ring->adev;
147 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
148 
149 	if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
150 		amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr,
151 					      seq, kiq_ring->xcc_id);
152 		return;
153 	}
154 
155 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
156 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
157 			  PACKET3_UNMAP_QUEUES_ACTION(action) |
158 			  PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
159 			  PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
160 			  PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
161 	amdgpu_ring_write(kiq_ring,
162 		  PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
163 
164 	if (action == PREEMPT_QUEUES_NO_UNMAP) {
165 		amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
166 		amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
167 		amdgpu_ring_write(kiq_ring, seq);
168 	} else {
169 		amdgpu_ring_write(kiq_ring, 0);
170 		amdgpu_ring_write(kiq_ring, 0);
171 		amdgpu_ring_write(kiq_ring, 0);
172 	}
173 }
174 
175 static void gfx_v12_1_kiq_query_status(struct amdgpu_ring *kiq_ring,
176 				       struct amdgpu_ring *ring,
177 				       u64 addr, u64 seq)
178 {
179 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
180 
181 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
182 	amdgpu_ring_write(kiq_ring,
183 			  PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
184 			  PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
185 			  PACKET3_QUERY_STATUS_COMMAND(2));
186 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
187 			  PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
188 			  PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
189 	amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
190 	amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
191 	amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
192 	amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
193 }
194 
195 static void gfx_v12_1_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
196 					  uint16_t pasid,
197 					  uint32_t flush_type,
198 					  bool all_hub)
199 {
200 	gfx_v12_1_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1);
201 }
202 
203 static const struct kiq_pm4_funcs gfx_v12_1_kiq_pm4_funcs = {
204 	.kiq_set_resources = gfx_v12_1_kiq_set_resources,
205 	.kiq_map_queues = gfx_v12_1_kiq_map_queues,
206 	.kiq_unmap_queues = gfx_v12_1_kiq_unmap_queues,
207 	.kiq_query_status = gfx_v12_1_kiq_query_status,
208 	.kiq_invalidate_tlbs = gfx_v12_1_kiq_invalidate_tlbs,
209 	.set_resources_size = 8,
210 	.map_queues_size = 7,
211 	.unmap_queues_size = 6,
212 	.query_status_size = 7,
213 	.invalidate_tlbs_size = 2,
214 };
215 
216 static void gfx_v12_1_set_kiq_pm4_funcs(struct amdgpu_device *adev)
217 {
218 	int i, num_xcc;
219 
220 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
221 	for (i =0; i < num_xcc; i++)
222 		adev->gfx.kiq[i].pmf = &gfx_v12_1_kiq_pm4_funcs;
223 }
224 
225 static void gfx_v12_1_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
226 				   int mem_space, int opt, uint32_t addr0,
227 				   uint32_t addr1, uint32_t ref,
228 				   uint32_t mask, uint32_t inv)
229 {
230 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
231 	amdgpu_ring_write(ring,
232 			  /* memory (1) or register (0) */
233 			  (WAIT_REG_MEM_MEM_SPACE(mem_space) |
234 			   WAIT_REG_MEM_OPERATION(opt) | /* wait */
235 			   WAIT_REG_MEM_FUNCTION(3) |  /* equal */
236 			   WAIT_REG_MEM_ENGINE(eng_sel)));
237 
238 	if (mem_space)
239 		BUG_ON(addr0 & 0x3); /* Dword align */
240 	amdgpu_ring_write(ring, addr0);
241 	amdgpu_ring_write(ring, addr1);
242 	amdgpu_ring_write(ring, ref);
243 	amdgpu_ring_write(ring, mask);
244 	amdgpu_ring_write(ring, inv); /* poll interval */
245 }
246 
247 static int gfx_v12_1_ring_test_ring(struct amdgpu_ring *ring)
248 {
249 	struct amdgpu_device *adev = ring->adev;
250 	uint32_t scratch_reg0_offset, xcc_offset;
251 	uint32_t tmp = 0;
252 	unsigned i;
253 	int r;
254 
255 	/* Use register offset which is local to XCC in the packet */
256 	xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
257 	scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0);
258 	WREG32(scratch_reg0_offset, 0xCAFEDEAD);
259 	tmp = RREG32(scratch_reg0_offset);
260 
261 	r = amdgpu_ring_alloc(ring, 5);
262 	if (r) {
263 		dev_err(adev->dev,
264 			"amdgpu: cp failed to lock ring %d (%d).\n",
265 			ring->idx, r);
266 		return r;
267 	}
268 
269 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
270 		gfx_v12_1_ring_emit_wreg(ring, xcc_offset, 0xDEADBEEF);
271 	} else {
272 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
273 		amdgpu_ring_write(ring, xcc_offset -
274 				  PACKET3_SET_UCONFIG_REG_START);
275 		amdgpu_ring_write(ring, 0xDEADBEEF);
276 	}
277 	amdgpu_ring_commit(ring);
278 
279 	for (i = 0; i < adev->usec_timeout; i++) {
280 		tmp = RREG32(scratch_reg0_offset);
281 		if (tmp == 0xDEADBEEF)
282 			break;
283 		if (amdgpu_emu_mode == 1)
284 			msleep(1);
285 		else
286 			udelay(1);
287 	}
288 
289 	if (i >= adev->usec_timeout)
290 		r = -ETIMEDOUT;
291 	return r;
292 }
293 
294 static int gfx_v12_1_ring_test_ib(struct amdgpu_ring *ring, long timeout)
295 {
296 	struct amdgpu_device *adev = ring->adev;
297 	struct amdgpu_ib ib;
298 	struct dma_fence *f = NULL;
299 	unsigned index;
300 	uint64_t gpu_addr;
301 	volatile uint32_t *cpu_ptr;
302 	long r;
303 
304 	/* MES KIQ fw hasn't indirect buffer support for now */
305 	if (adev->enable_mes_kiq &&
306 	    ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
307 		return 0;
308 
309 	memset(&ib, 0, sizeof(ib));
310 
311 	r = amdgpu_device_wb_get(adev, &index);
312 	if (r)
313 		return r;
314 
315 	gpu_addr = adev->wb.gpu_addr + (index * 4);
316 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
317 	cpu_ptr = &adev->wb.wb[index];
318 
319 	r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
320 	if (r) {
321 		dev_err(adev->dev, "amdgpu: failed to get ib (%ld).\n", r);
322 		goto err1;
323 	}
324 
325 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
326 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
327 	ib.ptr[2] = lower_32_bits(gpu_addr);
328 	ib.ptr[3] = upper_32_bits(gpu_addr);
329 	ib.ptr[4] = 0xDEADBEEF;
330 	ib.length_dw = 5;
331 
332 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
333 	if (r)
334 		goto err2;
335 
336 	r = dma_fence_wait_timeout(f, false, timeout);
337 	if (r == 0) {
338 		r = -ETIMEDOUT;
339 		goto err2;
340 	} else if (r < 0) {
341 		goto err2;
342 	}
343 
344 	if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF)
345 		r = 0;
346 	else
347 		r = -EINVAL;
348 err2:
349 	amdgpu_ib_free(&ib, NULL);
350 	dma_fence_put(f);
351 err1:
352 	amdgpu_device_wb_free(adev, index);
353 	return r;
354 }
355 
356 static void gfx_v12_1_free_microcode(struct amdgpu_device *adev)
357 {
358 	amdgpu_ucode_release(&adev->gfx.rlc_fw);
359 	amdgpu_ucode_release(&adev->gfx.mec_fw);
360 
361 	kfree(adev->gfx.rlc.register_list_format);
362 }
363 
364 static int gfx_v12_1_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix)
365 {
366 	const struct psp_firmware_header_v1_0 *toc_hdr;
367 	int err = 0;
368 
369 	err = amdgpu_ucode_request(adev, &adev->psp.toc_fw,
370 				   AMDGPU_UCODE_REQUIRED,
371 				   "amdgpu/%s_toc.bin", ucode_prefix);
372 	if (err)
373 		goto out;
374 
375 	toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
376 	adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
377 	adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
378 	adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
379 	adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
380 			le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
381 	return 0;
382 out:
383 	amdgpu_ucode_release(&adev->psp.toc_fw);
384 	return err;
385 }
386 
387 static int gfx_v12_1_init_microcode(struct amdgpu_device *adev)
388 {
389 	char ucode_prefix[15];
390 	int err;
391 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
392 	uint16_t version_major;
393 	uint16_t version_minor;
394 
395 	DRM_DEBUG("\n");
396 
397 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
398 
399 	if (!amdgpu_sriov_vf(adev)) {
400 		err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
401 					   AMDGPU_UCODE_REQUIRED,
402 					   "amdgpu/%s_rlc.bin", ucode_prefix);
403 		if (err)
404 			goto out;
405 		rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
406 		version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
407 		version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
408 		err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
409 		if (err)
410 			goto out;
411 	}
412 
413 	err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
414 				   AMDGPU_UCODE_REQUIRED,
415 				   "amdgpu/%s_mec.bin", ucode_prefix);
416 	if (err)
417 		goto out;
418 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC);
419 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK);
420 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK);
421 
422 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
423 		err = gfx_v12_1_init_toc_microcode(adev, ucode_prefix);
424 
425 	/* only one MEC for gfx 12 */
426 	adev->gfx.mec2_fw = NULL;
427 
428 	if (adev->gfx.imu.funcs) {
429 		if (adev->gfx.imu.funcs->init_microcode) {
430 			err = adev->gfx.imu.funcs->init_microcode(adev);
431 			if (err)
432 				dev_err(adev->dev, "Failed to load imu firmware!\n");
433 		}
434 	}
435 
436 out:
437 	if (err) {
438 		amdgpu_ucode_release(&adev->gfx.rlc_fw);
439 		amdgpu_ucode_release(&adev->gfx.mec_fw);
440 	}
441 
442 	return err;
443 }
444 
445 static u32 gfx_v12_1_get_csb_size(struct amdgpu_device *adev)
446 {
447 	u32 count = 0;
448 	const struct cs_section_def *sect = NULL;
449 	const struct cs_extent_def *ext = NULL;
450 
451 	count += 1;
452 
453 	for (sect = gfx12_cs_data; sect->section != NULL; ++sect) {
454 		if (sect->id == SECT_CONTEXT) {
455 			for (ext = sect->section; ext->extent != NULL; ++ext)
456 				count += 2 + ext->reg_count;
457 		} else
458 			return 0;
459 	}
460 
461 	return count;
462 }
463 
464 static void gfx_v12_1_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
465 {
466 	u32 count = 0, clustercount = 0, i;
467 	const struct cs_section_def *sect = NULL;
468 	const struct cs_extent_def *ext = NULL;
469 
470 	if (adev->gfx.rlc.cs_data == NULL)
471 		return;
472 	if (buffer == NULL)
473 		return;
474 
475 	count += 1;
476 
477 	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
478 		if (sect->id == SECT_CONTEXT) {
479 			for (ext = sect->section; ext->extent != NULL; ++ext) {
480 				clustercount++;
481 				buffer[count++] = ext->reg_count;
482 				buffer[count++] = ext->reg_index;
483 
484 				for (i = 0; i < ext->reg_count; i++)
485 					buffer[count++] = cpu_to_le32(ext->extent[i]);
486 			}
487 		} else
488 			return;
489 	}
490 
491 	buffer[0] = clustercount;
492 }
493 
494 static void gfx_v12_1_rlc_fini(struct amdgpu_device *adev)
495 {
496 	/* clear state block */
497 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
498 			&adev->gfx.rlc.clear_state_gpu_addr,
499 			(void **)&adev->gfx.rlc.cs_ptr);
500 
501 	/* jump table block */
502 	amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
503 			&adev->gfx.rlc.cp_table_gpu_addr,
504 			(void **)&adev->gfx.rlc.cp_table_ptr);
505 }
506 
507 static void gfx_v12_1_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
508 {
509 	int xcc_id, num_xcc;
510 	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
511 
512 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
513 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
514 		reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)];
515 		reg_access_ctrl->scratch_reg0 =
516 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG0);
517 		reg_access_ctrl->scratch_reg1 =
518 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG1);
519 		reg_access_ctrl->scratch_reg2 =
520 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG2);
521 		reg_access_ctrl->scratch_reg3 =
522 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG3);
523 		reg_access_ctrl->grbm_cntl =
524 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL);
525 		reg_access_ctrl->grbm_idx =
526 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX);
527 		reg_access_ctrl->spare_int =
528 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT_0);
529 	}
530 	adev->gfx.rlc.rlcg_reg_access_supported = true;
531 }
532 
533 static int gfx_v12_1_rlc_init(struct amdgpu_device *adev)
534 {
535 	const struct cs_section_def *cs_data;
536 	int r, i, num_xcc;
537 
538 	adev->gfx.rlc.cs_data = gfx12_cs_data;
539 
540 	cs_data = adev->gfx.rlc.cs_data;
541 
542 	if (cs_data) {
543 		/* init clear state block */
544 		r = amdgpu_gfx_rlc_init_csb(adev);
545 		if (r)
546 			return r;
547 	}
548 
549 	/* init spm vmid with 0xf */
550 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
551 	for (i = 0; i < num_xcc; i++) {
552 		if (adev->gfx.rlc.funcs->update_spm_vmid)
553 			adev->gfx.rlc.funcs->update_spm_vmid(adev, i, NULL, 0xf);
554 	}
555 
556 	return 0;
557 }
558 
559 static void gfx_v12_1_mec_fini(struct amdgpu_device *adev)
560 {
561 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
562 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
563 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL);
564 }
565 
566 static int gfx_v12_1_mec_init(struct amdgpu_device *adev)
567 {
568 	int r, i, num_xcc;
569 	u32 *hpd;
570 	size_t mec_hpd_size;
571 
572 	bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
573 
574 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
575 	for (i = 0; i < num_xcc; i++)
576 		bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap,
577 			    AMDGPU_MAX_COMPUTE_QUEUES);
578 
579 	/* take ownership of the relevant compute queues */
580 	amdgpu_gfx_compute_queue_acquire(adev);
581 	mec_hpd_size = adev->gfx.num_compute_rings *
582 		       GFX12_MEC_HPD_SIZE * num_xcc;
583 
584 	if (mec_hpd_size) {
585 		r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
586 					      AMDGPU_GEM_DOMAIN_GTT,
587 					      &adev->gfx.mec.hpd_eop_obj,
588 					      &adev->gfx.mec.hpd_eop_gpu_addr,
589 					      (void **)&hpd);
590 		if (r) {
591 			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
592 			gfx_v12_1_mec_fini(adev);
593 			return r;
594 		}
595 
596 		memset(hpd, 0, mec_hpd_size);
597 
598 		amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
599 		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
600 	}
601 
602 	return 0;
603 }
604 
605 static uint32_t wave_read_ind(struct amdgpu_device *adev,
606 			      uint32_t xcc_id, uint32_t wave,
607 			      uint32_t address)
608 {
609 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
610 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
611 		(address << SQ_IND_INDEX__INDEX__SHIFT));
612 	return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
613 }
614 
615 static void wave_read_regs(struct amdgpu_device *adev,
616 			   uint32_t xcc_id, uint32_t wave,
617 			   uint32_t thread, uint32_t regno,
618 			   uint32_t num, uint32_t *out)
619 {
620 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
621 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
622 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
623 		(thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
624 		(SQ_IND_INDEX__AUTO_INCR_MASK));
625 	while (num--)
626 		*(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
627 }
628 
629 static void gfx_v12_1_read_wave_data(struct amdgpu_device *adev,
630 				     uint32_t xcc_id,
631 				     uint32_t simd, uint32_t wave,
632 				     uint32_t *dst, int *no_fields)
633 {
634 	/* in gfx12 the SIMD_ID is specified as part of the INSTANCE
635 	 * field when performing a select_se_sh so it should be
636 	 * zero here */
637 	WARN_ON(simd != 0);
638 
639 	/* type 4 wave data */
640 	dst[(*no_fields)++] = 4;
641 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_STATUS);
642 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_PC_LO);
643 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_PC_HI);
644 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXEC_LO);
645 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXEC_HI);
646 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_HW_ID1);
647 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_HW_ID2);
648 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_GPR_ALLOC);
649 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_LDS_ALLOC);
650 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_IB_STS);
651 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_IB_STS2);
652 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_IB_DBG1);
653 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_M0);
654 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_MODE);
655 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_STATE_PRIV);
656 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXCP_FLAG_PRIV);
657 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXCP_FLAG_USER);
658 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_TRAP_CTRL);
659 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_ACTIVE);
660 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_VALID_AND_IDLE);
661 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_DVGPR_ALLOC_LO);
662 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_DVGPR_ALLOC_HI);
663 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_SCHED_MODE);
664 }
665 
666 static void gfx_v12_1_read_wave_sgprs(struct amdgpu_device *adev,
667 				      uint32_t xcc_id, uint32_t simd,
668 				      uint32_t wave, uint32_t start,
669 				      uint32_t size, uint32_t *dst)
670 {
671 	WARN_ON(simd != 0);
672 
673 	wave_read_regs(adev, xcc_id, wave, 0,
674 		       start + SQIND_WAVE_SGPRS_OFFSET,
675 		       size, dst);
676 }
677 
678 static void gfx_v12_1_read_wave_vgprs(struct amdgpu_device *adev,
679 				      uint32_t xcc_id, uint32_t simd,
680 				      uint32_t wave, uint32_t thread,
681 				      uint32_t start, uint32_t size,
682 				      uint32_t *dst)
683 {
684 	wave_read_regs(adev, xcc_id, wave, thread,
685 		       start + SQIND_WAVE_VGPRS_OFFSET,
686 		       size, dst);
687 }
688 
689 static void gfx_v12_1_select_me_pipe_q(struct amdgpu_device *adev,
690 				       u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
691 {
692 	soc_v1_0_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id));
693 }
694 
695 static const struct amdgpu_gfx_funcs gfx_v12_1_gfx_funcs = {
696 	.get_gpu_clock_counter = &gfx_v12_1_get_gpu_clock_counter,
697 	.select_se_sh = &gfx_v12_1_xcc_select_se_sh,
698 	.read_wave_data = &gfx_v12_1_read_wave_data,
699 	.read_wave_sgprs = &gfx_v12_1_read_wave_sgprs,
700 	.read_wave_vgprs = &gfx_v12_1_read_wave_vgprs,
701 	.select_me_pipe_q = &gfx_v12_1_select_me_pipe_q,
702 	.update_perfmon_mgcg = &gfx_v12_1_update_perf_clk,
703 };
704 
705 static int gfx_v12_1_gpu_early_init(struct amdgpu_device *adev)
706 {
707 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
708 	case IP_VERSION(12, 1, 0):
709 		adev->gfx.config.max_hw_contexts = 8;
710 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
711 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
712 		adev->gfx.config.sc_hiz_tile_fifo_size = 0;
713 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
714 		break;
715 	default:
716 		BUG();
717 		break;
718 	}
719 
720 	return 0;
721 }
722 
723 static int gfx_v12_1_compute_ring_init(struct amdgpu_device *adev, int ring_id,
724 				       int xcc_id, int mec, int pipe, int queue)
725 {
726 	int r;
727 	unsigned irq_type;
728 	struct amdgpu_ring *ring;
729 	unsigned int hw_prio;
730 	uint32_t xcc_doorbell_start;
731 
732 	ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings +
733 				       ring_id];
734 
735 	/* mec0 is me1 */
736 	ring->xcc_id = xcc_id;
737 	ring->me = mec + 1;
738 	ring->pipe = pipe;
739 	ring->queue = queue;
740 
741 	ring->ring_obj = NULL;
742 	ring->use_doorbell = true;
743 	xcc_doorbell_start = adev->doorbell_index.mec_ring0 +
744 			     xcc_id * adev->doorbell_index.xcc_doorbell_range;
745 	ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1;
746 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr +
747 			     (ring_id + xcc_id * adev->gfx.num_compute_rings) *
748 			     GFX12_MEC_HPD_SIZE;
749 	ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
750 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
751 
752 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
753 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
754 		+ ring->pipe;
755 	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
756 			AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
757 	/* type-2 packets are deprecated on MEC, use type-3 instead */
758 	r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
759 			     hw_prio, NULL);
760 	if (r)
761 		return r;
762 
763 	return 0;
764 }
765 
766 static struct {
767 	SOC24_FIRMWARE_ID	id;
768 	unsigned int		offset;
769 	unsigned int		size;
770 	unsigned int		size_x16;
771 	unsigned int		num_inst;
772 } rlc_autoload_info[SOC24_FIRMWARE_ID_MAX];
773 
774 #define RLC_TOC_OFFSET_DWUNIT   8
775 #define RLC_SIZE_MULTIPLE       1024
776 #define RLC_TOC_UMF_SIZE_inM	23ULL
777 #define RLC_TOC_FORMAT_API	165ULL
778 
779 #define RLC_NUM_INS_CODE0   1
780 #define RLC_NUM_INS_CODE1   8
781 #define RLC_NUM_INS_CODE2   2
782 #define RLC_NUM_INS_CODE3   16
783 
784 static void gfx_v12_1_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc)
785 {
786 	RLC_TABLE_OF_CONTENT_V2 *ucode = rlc_toc;
787 
788 	while (ucode && (ucode->id > SOC24_FIRMWARE_ID_INVALID)) {
789 		rlc_autoload_info[ucode->id].id = ucode->id;
790 		rlc_autoload_info[ucode->id].offset =
791 			ucode->offset * RLC_TOC_OFFSET_DWUNIT * 4;
792 		rlc_autoload_info[ucode->id].size =
793 			ucode->size_x16 ? ucode->size * RLC_SIZE_MULTIPLE * 4 :
794 					  ucode->size * 4;
795 		switch (ucode->vfflr_image_code) {
796 		case 0:
797 			rlc_autoload_info[ucode->id].num_inst =
798 				RLC_NUM_INS_CODE0;
799 			break;
800 		case 1:
801 			rlc_autoload_info[ucode->id].num_inst =
802 				RLC_NUM_INS_CODE1;
803 			break;
804 		case 2:
805 			rlc_autoload_info[ucode->id].num_inst =
806 				RLC_NUM_INS_CODE2;
807 			break;
808 		case 3:
809 			rlc_autoload_info[ucode->id].num_inst =
810 				RLC_NUM_INS_CODE3;
811 			break;
812 		default:
813 			dev_err(adev->dev,
814 				"Invalid Instance number detected\n");
815 			break;
816 		}
817 		ucode++;
818 	}
819 }
820 
821 static uint32_t gfx_v12_1_calc_toc_total_size(struct amdgpu_device *adev)
822 {
823 	uint32_t total_size = 0;
824 	SOC24_FIRMWARE_ID id;
825 
826 	gfx_v12_1_parse_rlc_toc(adev, adev->psp.toc.start_addr);
827 
828 	for (id = SOC24_FIRMWARE_ID_RLC_G_UCODE; id < SOC24_FIRMWARE_ID_MAX; id++)
829 		total_size += rlc_autoload_info[id].size;
830 
831 	/* In case the offset in rlc toc ucode is aligned */
832 	if (total_size < rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].offset)
833 		total_size = rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].offset +
834 			rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].size;
835 	if (total_size < (RLC_TOC_UMF_SIZE_inM << 20))
836 		total_size = RLC_TOC_UMF_SIZE_inM << 20;
837 
838 	return total_size;
839 }
840 
841 static int gfx_v12_1_rlc_autoload_buffer_init(struct amdgpu_device *adev)
842 {
843 	int r;
844 	uint32_t total_size;
845 
846 	total_size = gfx_v12_1_calc_toc_total_size(adev);
847 
848 	r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024,
849 				      AMDGPU_GEM_DOMAIN_VRAM,
850 				      &adev->gfx.rlc.rlc_autoload_bo,
851 				      &adev->gfx.rlc.rlc_autoload_gpu_addr,
852 				      (void **)&adev->gfx.rlc.rlc_autoload_ptr);
853 
854 	if (r) {
855 		dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
856 		return r;
857 	}
858 
859 	return 0;
860 }
861 
862 static void gfx_v12_1_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev,
863 						       SOC24_FIRMWARE_ID id,
864 						       const void *fw_data,
865 						       uint32_t fw_size)
866 {
867 	uint32_t toc_offset;
868 	uint32_t toc_fw_size, toc_fw_inst_size;
869 	char *ptr = adev->gfx.rlc.rlc_autoload_ptr;
870 	int i, num_inst;
871 
872 	if (id <= SOC24_FIRMWARE_ID_INVALID || id >= SOC24_FIRMWARE_ID_MAX)
873 		return;
874 
875 	toc_offset = rlc_autoload_info[id].offset;
876 	toc_fw_size = rlc_autoload_info[id].size;
877 	num_inst = rlc_autoload_info[id].num_inst;
878 	toc_fw_inst_size = toc_fw_size / num_inst;
879 
880 	if (fw_size == 0)
881 		fw_size = toc_fw_inst_size;
882 
883 	if (fw_size > toc_fw_inst_size)
884 		fw_size = toc_fw_inst_size;
885 
886 	for (i = 0; i < num_inst; i++) {
887 		memcpy(ptr + toc_offset + i * toc_fw_inst_size, fw_data, fw_size);
888 
889 		if (fw_size < toc_fw_inst_size)
890 			memset(ptr + toc_offset + fw_size + i * toc_fw_inst_size,
891 			       0, toc_fw_inst_size - fw_size);
892 	}
893 }
894 
895 static void
896 gfx_v12_1_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev)
897 {
898 	void *data;
899 	uint32_t size;
900 	uint32_t *toc_ptr;
901 
902 	data = adev->psp.toc.start_addr;
903 	size = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_TOC].size;
904 
905 	toc_ptr = (uint32_t *)data + size / 4 - 2;
906 	*toc_ptr = (RLC_TOC_FORMAT_API << 24) | 0x1;
907 
908 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_TOC,
909 						   data, size);
910 }
911 
912 static void
913 gfx_v12_1_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev)
914 {
915 	const __le32 *fw_data;
916 	uint32_t fw_size;
917 	const struct gfx_firmware_header_v2_0 *cpv2_hdr;
918 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
919 	const struct rlc_firmware_header_v2_1 *rlcv21_hdr;
920 	const struct rlc_firmware_header_v2_2 *rlcv22_hdr;
921 	uint16_t version_major, version_minor;
922 
923 	/* mec ucode */
924 	cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
925 		adev->gfx.mec_fw->data;
926 	/* instruction */
927 	fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
928 		le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
929 	fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
930 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC,
931 						   fw_data, fw_size);
932 	/* data */
933 	fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
934 		le32_to_cpu(cpv2_hdr->data_offset_bytes));
935 	fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
936 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P0_STACK,
937 						   fw_data, fw_size);
938 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P1_STACK,
939 						   fw_data, fw_size);
940 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P2_STACK,
941 						   fw_data, fw_size);
942 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P3_STACK,
943 						   fw_data, fw_size);
944 
945 	/* rlc ucode */
946 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)
947 		adev->gfx.rlc_fw->data;
948 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
949 			le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes));
950 	fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes);
951 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_G_UCODE,
952 						   fw_data, fw_size);
953 
954 	version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
955 	version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
956 	if (version_major == 2) {
957 		if (version_minor >= 1) {
958 			rlcv21_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
959 
960 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
961 					le32_to_cpu(rlcv21_hdr->save_restore_list_gpm_offset_bytes));
962 			fw_size = le32_to_cpu(rlcv21_hdr->save_restore_list_gpm_size_bytes);
963 			gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLCG_SCRATCH,
964 						   fw_data, fw_size);
965 
966 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
967 					le32_to_cpu(rlcv21_hdr->save_restore_list_srm_offset_bytes));
968 			fw_size = le32_to_cpu(rlcv21_hdr->save_restore_list_srm_size_bytes);
969 			gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_SRM_ARAM,
970 						   fw_data, fw_size);
971 		}
972 		if (version_minor >= 2) {
973 			rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
974 
975 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
976 					le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes));
977 			fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes);
978 			gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLX6_UCODE,
979 						   fw_data, fw_size);
980 
981 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
982 					le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes));
983 			fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes);
984 			gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLX6_DRAM_BOOT,
985 						   fw_data, fw_size);
986 		}
987 	}
988 }
989 
990 static void
991 gfx_v12_1_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev)
992 {
993 	const __le32 *fw_data;
994 	uint32_t fw_size;
995 	const struct sdma_firmware_header_v3_0 *sdma_hdr;
996 
997 	if (adev->sdma.instance[0].fw) {
998 		sdma_hdr = (const struct sdma_firmware_header_v3_0 *)
999 			adev->sdma.instance[0].fw->data;
1000 		fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1001 				le32_to_cpu(sdma_hdr->ucode_offset_bytes));
1002 		fw_size = le32_to_cpu(sdma_hdr->ucode_size_bytes);
1003 
1004 		gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_SDMA_UCODE_TH0,
1005 							   fw_data, fw_size);
1006 	}
1007 }
1008 
1009 static void
1010 gfx_v12_1_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev)
1011 {
1012 	const __le32 *fw_data;
1013 	unsigned fw_size;
1014 	const struct mes_firmware_header_v1_0 *mes_hdr;
1015 	int pipe, ucode_id, data_id;
1016 
1017 	for (pipe = 0; pipe < 2; pipe++) {
1018 		if (pipe == 0) {
1019 			ucode_id = SOC24_FIRMWARE_ID_RS64_MES_P0;
1020 			data_id  = SOC24_FIRMWARE_ID_RS64_MES_P0_STACK;
1021 		} else {
1022 			ucode_id = SOC24_FIRMWARE_ID_RS64_MES_P1;
1023 			data_id  = SOC24_FIRMWARE_ID_RS64_MES_P1_STACK;
1024 		}
1025 
1026 		mes_hdr = (const struct mes_firmware_header_v1_0 *)
1027 			adev->mes.fw[pipe]->data;
1028 
1029 		fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1030 				le32_to_cpu(mes_hdr->mes_ucode_offset_bytes));
1031 		fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
1032 
1033 		gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, ucode_id, fw_data, fw_size);
1034 
1035 		fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1036 				le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
1037 		fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
1038 
1039 		gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, data_id, fw_data, fw_size);
1040 	}
1041 }
1042 
1043 static int gfx_v12_1_rlc_backdoor_autoload_enable(struct amdgpu_device *adev)
1044 {
1045 	uint32_t rlc_g_offset, rlc_g_size;
1046 	uint64_t gpu_addr;
1047 	uint32_t data;
1048 
1049 	/* RLC autoload sequence 2: copy ucode */
1050 	gfx_v12_1_rlc_backdoor_autoload_copy_sdma_ucode(adev);
1051 	gfx_v12_1_rlc_backdoor_autoload_copy_gfx_ucode(adev);
1052 	gfx_v12_1_rlc_backdoor_autoload_copy_mes_ucode(adev);
1053 	gfx_v12_1_rlc_backdoor_autoload_copy_toc_ucode(adev);
1054 
1055 	rlc_g_offset = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_G_UCODE].offset;
1056 	rlc_g_size = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_G_UCODE].size;
1057 	gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset - adev->gmc.vram_start;
1058 
1059 	WREG32_SOC15(GC, GET_INST(GC, 0),
1060 		     regGFX_IMU_RLC_BOOTLOADER_ADDR_HI, upper_32_bits(gpu_addr));
1061 	WREG32_SOC15(GC, GET_INST(GC, 0),
1062 		     regGFX_IMU_RLC_BOOTLOADER_ADDR_LO, lower_32_bits(gpu_addr));
1063 
1064 	WREG32_SOC15(GC, GET_INST(GC, 0),
1065 		     regGFX_IMU_RLC_BOOTLOADER_SIZE, rlc_g_size);
1066 
1067 	if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) {
1068 		/* RLC autoload sequence 3: load IMU fw */
1069 		if (adev->gfx.imu.funcs->load_microcode)
1070 			adev->gfx.imu.funcs->load_microcode(adev);
1071 		/* RLC autoload sequence 4 init IMU fw */
1072 		if (adev->gfx.imu.funcs->setup_imu)
1073 			adev->gfx.imu.funcs->setup_imu(adev);
1074 		if (adev->gfx.imu.funcs->start_imu)
1075 			adev->gfx.imu.funcs->start_imu(adev);
1076 
1077 		/* RLC autoload sequence 5 disable gpa mode */
1078 		gfx_v12_1_xcc_disable_gpa_mode(adev, 0);
1079 	} else {
1080 		/* unhalt rlc to start autoload without imu */
1081 		data = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPM_THREAD_ENABLE);
1082 		data = REG_SET_FIELD(data, RLC_GPM_THREAD_ENABLE, THREAD0_ENABLE, 1);
1083 		data = REG_SET_FIELD(data, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1);
1084 		WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPM_THREAD_ENABLE, data);
1085 		WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK);
1086 	}
1087 
1088 	return 0;
1089 }
1090 
1091 static int gfx_v12_1_sw_init(struct amdgpu_ip_block *ip_block)
1092 {
1093 	int i, j, k, r, ring_id = 0;
1094 	unsigned num_compute_rings;
1095 	int xcc_id, num_xcc;
1096 	struct amdgpu_device *adev = ip_block->adev;
1097 
1098 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1099 	case IP_VERSION(12, 1, 0):
1100 		adev->gfx.mec.num_mec = 1;
1101 		adev->gfx.mec.num_pipe_per_mec = 4;
1102 		adev->gfx.mec.num_queue_per_pipe = 8;
1103 		break;
1104 	default:
1105 		adev->gfx.mec.num_mec = 2;
1106 		adev->gfx.mec.num_pipe_per_mec = 2;
1107 		adev->gfx.mec.num_queue_per_pipe = 4;
1108 		break;
1109 	}
1110 
1111 	/* recalculate compute rings to use based on hardware configuration */
1112 	num_compute_rings = (adev->gfx.mec.num_pipe_per_mec *
1113 			     adev->gfx.mec.num_queue_per_pipe) / 2;
1114 	adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings,
1115 					  num_compute_rings);
1116 
1117 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1118 
1119 	/* EOP Event */
1120 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1121 			      GFX_11_0_0__SRCID__CP_EOP_INTERRUPT,
1122 			      &adev->gfx.eop_irq);
1123 	if (r)
1124 		return r;
1125 
1126 	/* Privileged reg */
1127 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1128 			      GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT,
1129 			      &adev->gfx.priv_reg_irq);
1130 	if (r)
1131 		return r;
1132 
1133 	/* Privileged inst */
1134 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1135 			      GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT,
1136 			      &adev->gfx.priv_inst_irq);
1137 	if (r)
1138 		return r;
1139 
1140 	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1141 
1142 	r = gfx_v12_1_rlc_init(adev);
1143 	if (r) {
1144 		dev_err(adev->dev, "Failed to init rlc BOs!\n");
1145 		return r;
1146 	}
1147 
1148 	r = gfx_v12_1_mec_init(adev);
1149 	if (r) {
1150 		dev_err(adev->dev, "Failed to init MEC BOs!\n");
1151 		return r;
1152 	}
1153 
1154 	/* set up the compute queues - allocate horizontally across pipes */
1155 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1156 		ring_id = 0;
1157 		for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1158 			for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1159 				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1160 					if (!amdgpu_gfx_is_mec_queue_enabled(adev,
1161 								xcc_id, i, k, j))
1162 						continue;
1163 
1164 					r = gfx_v12_1_compute_ring_init(adev, ring_id,
1165 								xcc_id, i, k, j);
1166 					if (r)
1167 						return r;
1168 
1169 					ring_id++;
1170 				}
1171 			}
1172 		}
1173 
1174 		if (!adev->enable_mes_kiq) {
1175 			r = amdgpu_gfx_kiq_init(adev, GFX12_MEC_HPD_SIZE, xcc_id);
1176 			if (r) {
1177 				dev_err(adev->dev, "Failed to init KIQ BOs!\n");
1178 				return r;
1179 			}
1180 
1181 			r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
1182 			if (r)
1183 				return r;
1184 		}
1185 
1186 		r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v12_1_compute_mqd), xcc_id);
1187 		if (r)
1188 			return r;
1189 	}
1190 
1191 	/* allocate visible FB for rlc auto-loading fw */
1192 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1193 		r = gfx_v12_1_rlc_autoload_buffer_init(adev);
1194 		if (r)
1195 			return r;
1196 	}
1197 
1198 	r = gfx_v12_1_gpu_early_init(adev);
1199 	if (r)
1200 		return r;
1201 
1202 	return 0;
1203 }
1204 
1205 static void gfx_v12_1_rlc_autoload_buffer_fini(struct amdgpu_device *adev)
1206 {
1207 	amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo,
1208 			&adev->gfx.rlc.rlc_autoload_gpu_addr,
1209 			(void **)&adev->gfx.rlc.rlc_autoload_ptr);
1210 }
1211 
1212 static int gfx_v12_1_sw_fini(struct amdgpu_ip_block *ip_block)
1213 {
1214 	int i, num_xcc;
1215 	struct amdgpu_device *adev = ip_block->adev;
1216 
1217 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1218 	for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++)
1219 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1220 
1221 	for (i = 0; i < num_xcc; i++) {
1222 		amdgpu_gfx_mqd_sw_fini(adev, i);
1223 
1224 		if (!adev->enable_mes_kiq) {
1225 			amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring);
1226 			amdgpu_gfx_kiq_fini(adev, i);
1227 		}
1228 	}
1229 
1230 	gfx_v12_1_rlc_fini(adev);
1231 	gfx_v12_1_mec_fini(adev);
1232 
1233 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
1234 		gfx_v12_1_rlc_autoload_buffer_fini(adev);
1235 
1236 	gfx_v12_1_free_microcode(adev);
1237 
1238 	return 0;
1239 }
1240 
1241 static void gfx_v12_1_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num,
1242 				       u32 sh_num, u32 instance, int xcc_id)
1243 {
1244 	u32 data;
1245 
1246 	if (instance == 0xffffffff)
1247 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
1248 				     INSTANCE_BROADCAST_WRITES, 1);
1249 	else
1250 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
1251 				     instance);
1252 
1253 	if (se_num == 0xffffffff)
1254 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
1255 				     1);
1256 	else
1257 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1258 
1259 	if (sh_num == 0xffffffff)
1260 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES,
1261 				     1);
1262 	else
1263 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num);
1264 
1265 	WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data);
1266 }
1267 
1268 static u32 gfx_v12_1_get_sa_active_bitmap(struct amdgpu_device *adev,
1269 					  int xcc_id)
1270 {
1271 	u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask;
1272 
1273 	gc_disabled_sa_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SA_UNIT_DISABLE);
1274 	gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask,
1275 					    CC_GC_SA_UNIT_DISABLE,
1276 					    SA_DISABLE);
1277 	gc_user_disabled_sa_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SA_UNIT_DISABLE);
1278 	gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask,
1279 						 GC_USER_SA_UNIT_DISABLE,
1280 						 SA_DISABLE);
1281 	sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se *
1282 					    adev->gfx.config.max_shader_engines);
1283 
1284 	return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask));
1285 }
1286 
1287 static u32 gfx_v12_1_get_rb_active_bitmap(struct amdgpu_device *adev,
1288 					  int xcc_id)
1289 {
1290 	u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask;
1291 	u32 rb_mask;
1292 
1293 	gc_disabled_rb_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
1294 					   regCC_RB_BACKEND_DISABLE);
1295 	gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask,
1296 					    CC_RB_BACKEND_DISABLE,
1297 					    BACKEND_DISABLE);
1298 	gc_user_disabled_rb_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
1299 						regGC_USER_RB_BACKEND_DISABLE);
1300 	gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask,
1301 						 GC_USER_RB_BACKEND_DISABLE,
1302 						 BACKEND_DISABLE);
1303 	rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se *
1304 					    adev->gfx.config.max_shader_engines);
1305 
1306 	return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask));
1307 }
1308 
1309 static void gfx_v12_1_setup_rb(struct amdgpu_device *adev)
1310 {
1311 	u32 rb_bitmap_width_per_sa;
1312 	u32 max_sa;
1313 	u32 active_sa_bitmap;
1314 	u32 global_active_rb_bitmap;
1315 	u32 active_rb_bitmap = 0;
1316 	u32 i;
1317 	int xcc_id;
1318 
1319 	for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
1320 		/* query sa bitmap from SA_UNIT_DISABLE registers */
1321 		active_sa_bitmap = gfx_v12_1_get_sa_active_bitmap(adev, xcc_id);
1322 		/* query rb bitmap from RB_BACKEND_DISABLE registers */
1323 		global_active_rb_bitmap = gfx_v12_1_get_rb_active_bitmap(adev, xcc_id);
1324 
1325 		/* generate active rb bitmap according to active sa bitmap */
1326 		max_sa = adev->gfx.config.max_shader_engines *
1327 			 adev->gfx.config.max_sh_per_se;
1328 		rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
1329 					 adev->gfx.config.max_sh_per_se;
1330 		for (i = 0; i < max_sa; i++) {
1331 			if (active_sa_bitmap & (1 << i))
1332 				active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
1333 		}
1334 
1335 		active_rb_bitmap |= global_active_rb_bitmap;
1336 	}
1337 
1338 	adev->gfx.config.backend_enable_mask = active_rb_bitmap;
1339 	adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
1340 }
1341 
1342 #define LDS_APP_BASE           0x1
1343 #define SCRATCH_APP_BASE       0x2
1344 
1345 static void gfx_v12_1_xcc_init_compute_vmid(struct amdgpu_device *adev,
1346 					    int xcc_id)
1347 {
1348 	int i;
1349 	uint32_t sh_mem_bases;
1350 	uint32_t data;
1351 
1352 	/*
1353 	 * Configure apertures:
1354 	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
1355 	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
1356 	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
1357 	 */
1358 	sh_mem_bases = (LDS_APP_BASE << SH_MEM_BASES__SHARED_BASE__SHIFT) |
1359 			SCRATCH_APP_BASE;
1360 
1361 	mutex_lock(&adev->srbm_mutex);
1362 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1363 		soc_v1_0_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1364 		/* CP and shaders */
1365 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1366 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases);
1367 
1368 		/* Enable trap for each kfd vmid. */
1369 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL);
1370 		data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
1371 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data);
1372 	}
1373 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1374 	mutex_unlock(&adev->srbm_mutex);
1375 }
1376 
1377 static void gfx_v12_1_tcp_harvest(struct amdgpu_device *adev)
1378 {
1379 	/* TODO: harvest feature to be added later. */
1380 }
1381 
1382 static void gfx_v12_1_get_tcc_info(struct amdgpu_device *adev)
1383 {
1384 }
1385 
1386 static void gfx_v12_1_xcc_constants_init(struct amdgpu_device *adev,
1387 					 int xcc_id)
1388 {
1389 	u32 tmp;
1390 	int i;
1391 
1392 	/* XXX SH_MEM regs */
1393 	/* where to put LDS, scratch, GPUVM in FSA64 space */
1394 	mutex_lock(&adev->srbm_mutex);
1395 	for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
1396 		soc_v1_0_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1397 		/* CP and shaders */
1398 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1399 			     regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1400 		if (i != 0) {
1401 			tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1402 				(adev->gmc.private_aperture_start >> 48));
1403 			tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1404 				(adev->gmc.shared_aperture_start >> 48));
1405 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, tmp);
1406 		}
1407 	}
1408 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0));
1409 
1410 	mutex_unlock(&adev->srbm_mutex);
1411 
1412 	gfx_v12_1_xcc_init_compute_vmid(adev, xcc_id);
1413 }
1414 
1415 static void gfx_v12_1_constants_init(struct amdgpu_device *adev)
1416 {
1417 	int i, num_xcc;
1418 
1419 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1420 
1421 	gfx_v12_1_setup_rb(adev);
1422 	gfx_v12_1_get_cu_info(adev, &adev->gfx.cu_info);
1423 	gfx_v12_1_get_tcc_info(adev);
1424 	adev->gfx.config.pa_sc_tile_steering_override = 0;
1425 
1426 	for (i = 0; i < num_xcc; i++)
1427 		gfx_v12_1_xcc_constants_init(adev, i);
1428 }
1429 
1430 static void gfx_v12_1_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1431 						    bool enable, int xcc_id)
1432 {
1433 	u32 tmp;
1434 
1435 	if (amdgpu_sriov_vf(adev))
1436 		return;
1437 
1438 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0);
1439 
1440 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
1441 			    enable ? 1 : 0);
1442 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
1443 			    enable ? 1 : 0);
1444 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
1445 			    enable ? 1 : 0);
1446 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
1447 			    enable ? 1 : 0);
1448 
1449 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp);
1450 }
1451 
1452 static int gfx_v12_1_xcc_init_csb(struct amdgpu_device *adev,
1453 				  int xcc_id)
1454 {
1455 	adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
1456 
1457 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CSIB_ADDR_HI,
1458 			adev->gfx.rlc.clear_state_gpu_addr >> 32);
1459 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CSIB_ADDR_LO,
1460 			adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1461 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1462 		     regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
1463 
1464 	return 0;
1465 }
1466 
1467 static void gfx_v12_1_xcc_rlc_stop(struct amdgpu_device *adev,
1468 				   int xcc_id)
1469 {
1470 	u32 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CNTL);
1471 
1472 	tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
1473 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CNTL, tmp);
1474 }
1475 
1476 static void gfx_v12_1_rlc_stop(struct amdgpu_device *adev)
1477 {
1478 	int i, num_xcc;
1479 
1480 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1481 	for (i = 0; i < num_xcc; i++)
1482 		gfx_v12_1_xcc_rlc_stop(adev, i);
1483 }
1484 
1485 static void gfx_v12_1_xcc_rlc_reset(struct amdgpu_device *adev,
1486 				    int xcc_id)
1487 {
1488 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id),
1489 			      GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
1490 	udelay(50);
1491 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id),
1492 			      GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
1493 	udelay(50);
1494 }
1495 
1496 static void gfx_v12_1_rlc_reset(struct amdgpu_device *adev)
1497 {
1498 	int i, num_xcc;
1499 
1500 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1501 	for (i = 0; i < num_xcc; i++)
1502 		gfx_v12_1_xcc_rlc_reset(adev, i);
1503 }
1504 
1505 static void gfx_v12_1_xcc_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
1506 						 bool enable, int xcc_id)
1507 {
1508 	uint32_t rlc_pg_cntl;
1509 
1510 	rlc_pg_cntl = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_PG_CNTL);
1511 
1512 	if (!enable) {
1513 		/* RLC_PG_CNTL[23] = 0 (default)
1514 		 * RLC will wait for handshake acks with SMU
1515 		 * GFXOFF will be enabled
1516 		 * RLC_PG_CNTL[23] = 1
1517 		 * RLC will not issue any message to SMU
1518 		 * hence no handshake between SMU & RLC
1519 		 * GFXOFF will be disabled
1520 		 */
1521 		rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
1522 	} else
1523 		rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
1524 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_PG_CNTL, rlc_pg_cntl);
1525 }
1526 
1527 static void gfx_v12_1_xcc_rlc_start(struct amdgpu_device *adev,
1528 				    int xcc_id)
1529 {
1530 	/* TODO: enable rlc & smu handshake until smu
1531 	 * and gfxoff feature works as expected */
1532 	if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK))
1533 		gfx_v12_1_xcc_rlc_smu_handshake_cntl(adev, false, xcc_id);
1534 
1535 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL, RLC_ENABLE_F32, 1);
1536 	udelay(50);
1537 }
1538 
1539 static void gfx_v12_1_rlc_start(struct amdgpu_device *adev)
1540 {
1541 	int i, num_xcc;
1542 
1543 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1544 	for (i = 0; i < num_xcc; i++) {
1545 		gfx_v12_1_xcc_rlc_start(adev, i);
1546 	}
1547 }
1548 
1549 static void gfx_v12_1_xcc_rlc_enable_srm(struct amdgpu_device *adev,
1550 					 int xcc_id)
1551 {
1552 	uint32_t tmp;
1553 
1554 	/* enable Save Restore Machine */
1555 	tmp = RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SRM_CNTL));
1556 	tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1557 	tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
1558 	WREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SRM_CNTL), tmp);
1559 }
1560 
1561 static void gfx_v12_1_xcc_load_rlcg_microcode(struct amdgpu_device *adev,
1562 					      int xcc_id)
1563 {
1564 	const struct rlc_firmware_header_v2_0 *hdr;
1565 	const __le32 *fw_data;
1566 	unsigned i, fw_size;
1567 
1568 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1569 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1570 			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1571 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1572 
1573 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR,
1574 		     RLCG_UCODE_LOADING_START_ADDRESS);
1575 
1576 	for (i = 0; i < fw_size; i++)
1577 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1578 			     regRLC_GPM_UCODE_DATA,
1579 			     le32_to_cpup(fw_data++));
1580 
1581 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1582 		     regRLC_GPM_UCODE_ADDR,
1583 		     adev->gfx.rlc_fw_version);
1584 }
1585 
1586 static void gfx_v12_1_xcc_load_rlc_iram_dram_microcode(struct amdgpu_device *adev,
1587 						       int xcc_id)
1588 {
1589 	const struct rlc_firmware_header_v2_2 *hdr;
1590 	const __le32 *fw_data;
1591 	unsigned i, fw_size;
1592 	u32 tmp;
1593 
1594 	hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1595 
1596 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1597 			le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes));
1598 	fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4;
1599 
1600 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_LX6_IRAM_ADDR, 0);
1601 
1602 	for (i = 0; i < fw_size; i++) {
1603 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1604 			msleep(1);
1605 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1606 			     regRLC_LX6_IRAM_DATA,
1607 			     le32_to_cpup(fw_data++));
1608 	}
1609 
1610 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1611 		     regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
1612 
1613 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1614 			le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes));
1615 	fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4;
1616 
1617 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1618 		     regRLC_LX6_DRAM_ADDR, 0);
1619 	for (i = 0; i < fw_size; i++) {
1620 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1621 			msleep(1);
1622 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1623 			     regRLC_LX6_DRAM_DATA,
1624 			     le32_to_cpup(fw_data++));
1625 	}
1626 
1627 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1628 		     regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
1629 
1630 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_LX6_CNTL);
1631 	tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1);
1632 	tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0);
1633 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_LX6_CNTL, tmp);
1634 }
1635 
1636 static int gfx_v12_1_xcc_rlc_load_microcode(struct amdgpu_device *adev,
1637 					    int xcc_id)
1638 {
1639 	const struct rlc_firmware_header_v2_0 *hdr;
1640 	uint16_t version_major;
1641 	uint16_t version_minor;
1642 
1643 	if (!adev->gfx.rlc_fw)
1644 		return -EINVAL;
1645 
1646 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1647 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
1648 
1649 	version_major = le16_to_cpu(hdr->header.header_version_major);
1650 	version_minor = le16_to_cpu(hdr->header.header_version_minor);
1651 
1652 	if (version_major == 2) {
1653 		gfx_v12_1_xcc_load_rlcg_microcode(adev, xcc_id);
1654 		if (amdgpu_dpm == 1) {
1655 			if (version_minor >= 2)
1656 				gfx_v12_1_xcc_load_rlc_iram_dram_microcode(adev, xcc_id);
1657 		}
1658 
1659 		return 0;
1660 	}
1661 
1662 	return -EINVAL;
1663 }
1664 
1665 static int gfx_v12_1_xcc_rlc_resume(struct amdgpu_device *adev,
1666 				    int xcc_id)
1667 {
1668 	int r;
1669 
1670 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1671 		gfx_v12_1_xcc_init_csb(adev, xcc_id);
1672 
1673 		if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
1674 			gfx_v12_1_xcc_rlc_enable_srm(adev, xcc_id);
1675 	} else {
1676 		if (amdgpu_sriov_vf(adev)) {
1677 			gfx_v12_1_xcc_init_csb(adev, xcc_id);
1678 			return 0;
1679 		}
1680 
1681 		gfx_v12_1_xcc_rlc_stop(adev, xcc_id);
1682 
1683 		/* disable CG */
1684 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0);
1685 
1686 		/* disable PG */
1687 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_PG_CNTL, 0);
1688 
1689 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1690 			/* legacy rlc firmware loading */
1691 			r = gfx_v12_1_xcc_rlc_load_microcode(adev, xcc_id);
1692 			if (r)
1693 				return r;
1694 		}
1695 
1696 		gfx_v12_1_xcc_init_csb(adev, xcc_id);
1697 
1698 		gfx_v12_1_xcc_rlc_start(adev, xcc_id);
1699 	}
1700 
1701 	return 0;
1702 }
1703 
1704 static int gfx_v12_1_rlc_resume(struct amdgpu_device *adev)
1705 {
1706 	int r, i, num_xcc;
1707 
1708 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1709 	for (i = 0; i < num_xcc; i++) {
1710 		r = gfx_v12_1_xcc_rlc_resume(adev, i);
1711 		if (r)
1712 			return r;
1713 	}
1714 
1715 	return 0;
1716 }
1717 
1718 static void gfx_v12_1_xcc_config_gfx_rs64(struct amdgpu_device *adev,
1719 					  int xcc_id)
1720 {
1721 	const struct gfx_firmware_header_v2_0 *mec_hdr;
1722 	uint32_t pipe_id, tmp;
1723 
1724 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)
1725 		adev->gfx.mec_fw->data;
1726 
1727 	/* config mec program start addr */
1728 	for (pipe_id = 0; pipe_id < 4; pipe_id++) {
1729 		soc_v1_0_grbm_select(adev, 1, pipe_id, 0, 0, GET_INST(GC, xcc_id));
1730 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START,
1731 					mec_hdr->ucode_start_addr_lo >> 2 |
1732 					mec_hdr->ucode_start_addr_hi << 30);
1733 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START_HI,
1734 					mec_hdr->ucode_start_addr_hi >> 2);
1735 	}
1736 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0));
1737 
1738 	/* reset mec pipe */
1739 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL);
1740 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1);
1741 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1);
1742 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1);
1743 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1);
1744 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL, tmp);
1745 
1746 	/* clear mec pipe reset */
1747 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0);
1748 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0);
1749 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0);
1750 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0);
1751 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL, tmp);
1752 }
1753 
1754 static void gfx_v12_1_config_gfx_rs64(struct amdgpu_device *adev)
1755 {
1756 	int i, num_xcc;
1757 
1758 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1759 
1760 	for (i = 0; i < num_xcc; i++)
1761 		gfx_v12_1_xcc_config_gfx_rs64(adev, i);
1762 }
1763 
1764 static void gfx_v12_1_xcc_set_mec_ucode_start_addr(struct amdgpu_device *adev,
1765 						   int xcc_id)
1766 {
1767 	const struct gfx_firmware_header_v2_0 *cp_hdr;
1768 	unsigned pipe_id;
1769 
1770 	cp_hdr = (const struct gfx_firmware_header_v2_0 *)
1771 		adev->gfx.mec_fw->data;
1772 	mutex_lock(&adev->srbm_mutex);
1773 	for (pipe_id = 0; pipe_id < adev->gfx.mec.num_pipe_per_mec; pipe_id++) {
1774 		soc_v1_0_grbm_select(adev, 1, pipe_id, 0, 0, GET_INST(GC, xcc_id));
1775 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START,
1776 			     cp_hdr->ucode_start_addr_lo >> 2 |
1777 			     cp_hdr->ucode_start_addr_hi << 30);
1778 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START_HI,
1779 			     cp_hdr->ucode_start_addr_hi >> 2);
1780 	}
1781 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0));
1782 	mutex_unlock(&adev->srbm_mutex);
1783 }
1784 
1785 static int gfx_v12_1_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
1786 {
1787 	uint32_t cp_status;
1788 	uint32_t bootload_status;
1789 	int i, xcc_id;
1790 
1791 	for (i = 0; i < adev->usec_timeout; i++) {
1792 		cp_status = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_STAT);
1793 		bootload_status = RREG32_SOC15(GC, GET_INST(GC, 0),
1794 					       regRLC_RLCS_BOOTLOAD_STATUS);
1795 
1796 		if ((cp_status == 0) &&
1797 		    (REG_GET_FIELD(bootload_status,
1798 			RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) {
1799 			break;
1800 		}
1801 		udelay(1);
1802 		if (amdgpu_emu_mode)
1803 			msleep(10);
1804 	}
1805 
1806 	if (i >= adev->usec_timeout) {
1807 		dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n");
1808 		return -ETIMEDOUT;
1809 	}
1810 
1811 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1812 		for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++)
1813 			gfx_v12_1_xcc_set_mec_ucode_start_addr(adev, xcc_id);
1814 	}
1815 
1816 	return 0;
1817 }
1818 
1819 static void gfx_v12_1_xcc_cp_compute_enable(struct amdgpu_device *adev,
1820 					    bool enable, int xcc_id)
1821 {
1822 	u32 data;
1823 
1824 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL);
1825 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE,
1826 						 enable ? 0 : 1);
1827 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET,
1828 						 enable ? 0 : 1);
1829 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET,
1830 						 enable ? 0 : 1);
1831 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET,
1832 						 enable ? 0 : 1);
1833 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET,
1834 						 enable ? 0 : 1);
1835 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE,
1836 						 enable ? 1 : 0);
1837 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE,
1838 			                         enable ? 1 : 0);
1839 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE,
1840 						 enable ? 1 : 0);
1841 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE,
1842 						 enable ? 1 : 0);
1843 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT,
1844 						 enable ? 0 : 1);
1845 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL, data);
1846 
1847 	adev->gfx.kiq[xcc_id].ring.sched.ready = enable;
1848 
1849 	udelay(50);
1850 }
1851 
1852 static int gfx_v12_1_xcc_cp_compute_load_microcode_rs64(struct amdgpu_device *adev,
1853 							int xcc_id)
1854 {
1855 	const struct gfx_firmware_header_v2_0 *mec_hdr;
1856 	const __le32 *fw_ucode, *fw_data;
1857 	u32 tmp, fw_ucode_size, fw_data_size;
1858 	u32 i, usec_timeout = 50000; /* Wait for 50 ms */
1859 	u32 *fw_ucode_ptr, *fw_data_ptr;
1860 	int r;
1861 
1862 	if (!adev->gfx.mec_fw)
1863 		return -EINVAL;
1864 
1865 	gfx_v12_1_xcc_cp_compute_enable(adev, false, xcc_id);
1866 
1867 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
1868 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
1869 
1870 	fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data +
1871 				le32_to_cpu(mec_hdr->ucode_offset_bytes));
1872 	fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes);
1873 
1874 	fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1875 				le32_to_cpu(mec_hdr->data_offset_bytes));
1876 	fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes);
1877 
1878 	r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
1879 				      64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
1880 				      &adev->gfx.mec.mec_fw_obj,
1881 				      &adev->gfx.mec.mec_fw_gpu_addr,
1882 				      (void **)&fw_ucode_ptr);
1883 	if (r) {
1884 		dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
1885 		gfx_v12_1_mec_fini(adev);
1886 		return r;
1887 	}
1888 
1889 	r = amdgpu_bo_create_reserved(adev,
1890 				      ALIGN(fw_data_size, 64 * 1024) *
1891 				      adev->gfx.mec.num_pipe_per_mec,
1892 				      64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
1893 				      &adev->gfx.mec.mec_fw_data_obj,
1894 				      &adev->gfx.mec.mec_fw_data_gpu_addr,
1895 				      (void **)&fw_data_ptr);
1896 	if (r) {
1897 		dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
1898 		gfx_v12_1_mec_fini(adev);
1899 		return r;
1900 	}
1901 
1902 	memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size);
1903 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
1904 		memcpy(fw_data_ptr + i * ALIGN(fw_data_size, 64 * 1024) / 4, fw_data, fw_data_size);
1905 	}
1906 
1907 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1908 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj);
1909 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1910 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj);
1911 
1912 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL);
1913 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
1914 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
1915 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
1916 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp);
1917 
1918 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_BASE_CNTL);
1919 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
1920 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
1921 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_BASE_CNTL, tmp);
1922 
1923 	mutex_lock(&adev->srbm_mutex);
1924 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
1925 		soc_v1_0_grbm_select(adev, 1, i, 0, 0, GET_INST(GC, xcc_id));
1926 
1927 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_MDBASE_LO,
1928 			     lower_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr +
1929 					   i * ALIGN(fw_data_size, 64 * 1024)));
1930 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_MDBASE_HI,
1931 			     upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr +
1932 					   i * ALIGN(fw_data_size, 64 * 1024)));
1933 
1934 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO,
1935 			     lower_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
1936 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI,
1937 			     upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
1938 	}
1939 	mutex_unlock(&adev->srbm_mutex);
1940 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0));
1941 
1942 	/* Trigger an invalidation of the L1 instruction caches */
1943 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_OP_CNTL);
1944 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
1945 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_OP_CNTL, tmp);
1946 
1947 	/* Wait for invalidation complete */
1948 	for (i = 0; i < usec_timeout; i++) {
1949 		tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_OP_CNTL);
1950 		if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
1951 				       INVALIDATE_DCACHE_COMPLETE))
1952 			break;
1953 		udelay(1);
1954 	}
1955 
1956 	if (i >= usec_timeout) {
1957 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
1958 		return -EINVAL;
1959 	}
1960 
1961 	/* Trigger an invalidation of the L1 instruction caches */
1962 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_OP_CNTL);
1963 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
1964 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_OP_CNTL, tmp);
1965 
1966 	/* Wait for invalidation complete */
1967 	for (i = 0; i < usec_timeout; i++) {
1968 		tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_OP_CNTL);
1969 		if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
1970 				       INVALIDATE_CACHE_COMPLETE))
1971 			break;
1972 		udelay(1);
1973 	}
1974 
1975 	if (i >= usec_timeout) {
1976 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
1977 		return -EINVAL;
1978 	}
1979 
1980 	gfx_v12_1_xcc_set_mec_ucode_start_addr(adev, xcc_id);
1981 
1982 	return 0;
1983 }
1984 
1985 static void gfx_v12_1_xcc_kiq_setting(struct amdgpu_ring *ring,
1986 				      int xcc_id)
1987 {
1988 	uint32_t tmp;
1989 	struct amdgpu_device *adev = ring->adev;
1990 
1991 	/* tell RLC which is KIQ queue */
1992 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
1993 	tmp &= 0xffffff00;
1994 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
1995 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
1996 	tmp |= 0x80;
1997 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
1998 }
1999 
2000 static void gfx_v12_1_xcc_cp_set_doorbell_range(struct amdgpu_device *adev,
2001 						int xcc_id)
2002 {
2003 	/* set graphics engine doorbell range */
2004 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_DOORBELL_RANGE_LOWER,
2005 		     (adev->doorbell_index.gfx_ring0 * 2) << 2);
2006 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_DOORBELL_RANGE_UPPER,
2007 		     (adev->doorbell_index.gfx_userqueue_end * 2) << 2);
2008 
2009 	/* set compute engine doorbell range */
2010 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DOORBELL_RANGE_LOWER,
2011 		     (adev->doorbell_index.kiq * 2) << 2);
2012 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DOORBELL_RANGE_UPPER,
2013 		     (adev->doorbell_index.userqueue_end * 2) << 2);
2014 }
2015 
2016 static int gfx_v12_1_compute_mqd_init(struct amdgpu_device *adev, void *m,
2017 				      struct amdgpu_mqd_prop *prop)
2018 {
2019 	struct v12_1_compute_mqd *mqd = m;
2020 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
2021 	uint32_t tmp;
2022 
2023 	mqd->header = 0xC0310800;
2024 	mqd->compute_pipelinestat_enable = 0x00000001;
2025 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
2026 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
2027 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
2028 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2029 	mqd->compute_misc_reserved = 0x00000007;
2030 
2031 	eop_base_addr = prop->eop_gpu_addr >> 8;
2032 	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
2033 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
2034 
2035 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2036 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_EOP_CONTROL);
2037 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
2038 			(order_base_2(GFX12_MEC_HPD_SIZE / 4) - 1));
2039 
2040 	mqd->cp_hqd_eop_control = tmp;
2041 
2042 	/* enable doorbell? */
2043 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PQ_DOORBELL_CONTROL);
2044 
2045 	if (prop->use_doorbell) {
2046 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2047 				    DOORBELL_OFFSET, prop->doorbell_index);
2048 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2049 				    DOORBELL_EN, 1);
2050 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2051 				    DOORBELL_SOURCE, 0);
2052 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2053 				    DOORBELL_HIT, 0);
2054 	} else {
2055 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2056 				    DOORBELL_EN, 0);
2057 	}
2058 
2059 	mqd->cp_hqd_pq_doorbell_control = tmp;
2060 
2061 	/* disable the queue if it's active */
2062 	mqd->cp_hqd_dequeue_request = 0;
2063 	mqd->cp_hqd_pq_rptr = 0;
2064 	mqd->cp_hqd_pq_wptr_lo = 0;
2065 	mqd->cp_hqd_pq_wptr_hi = 0;
2066 
2067 	/* set the pointer to the MQD */
2068 	mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc;
2069 	mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
2070 
2071 	/* set MQD vmid to 0 */
2072 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_MQD_CONTROL);
2073 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
2074 	mqd->cp_mqd_control = tmp;
2075 
2076 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2077 	hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
2078 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
2079 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2080 
2081 	/* set up the HQD, this is similar to CP_RB0_CNTL */
2082 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PQ_CONTROL);
2083 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
2084 			    (order_base_2(prop->queue_size / 4) - 1));
2085 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
2086 			    (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
2087 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
2088 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
2089 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
2090 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
2091 	mqd->cp_hqd_pq_control = tmp;
2092 
2093 	/* set the wb address whether it's enabled or not */
2094 	wb_gpu_addr = prop->rptr_gpu_addr;
2095 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
2096 	mqd->cp_hqd_pq_rptr_report_addr_hi =
2097 		upper_32_bits(wb_gpu_addr) & 0xffff;
2098 
2099 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2100 	wb_gpu_addr = prop->wptr_gpu_addr;
2101 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2102 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2103 
2104 	tmp = 0;
2105 	/* enable the doorbell if requested */
2106 	if (prop->use_doorbell) {
2107 		tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PQ_DOORBELL_CONTROL);
2108 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2109 				DOORBELL_OFFSET, prop->doorbell_index);
2110 
2111 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2112 				    DOORBELL_EN, 1);
2113 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2114 				    DOORBELL_SOURCE, 0);
2115 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2116 				    DOORBELL_HIT, 0);
2117 	}
2118 
2119 	mqd->cp_hqd_pq_doorbell_control = tmp;
2120 
2121 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2122 	mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PQ_RPTR);
2123 
2124 	/* set the vmid for the queue */
2125 	mqd->cp_hqd_vmid = 0;
2126 
2127 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PERSISTENT_STATE);
2128 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x63);
2129 	mqd->cp_hqd_persistent_state = tmp;
2130 
2131 	/* set MIN_IB_AVAIL_SIZE */
2132 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_IB_CONTROL);
2133 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 1);
2134 	mqd->cp_hqd_ib_control = tmp;
2135 
2136 	/* set static priority for a compute queue/ring */
2137 	mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority;
2138 	mqd->cp_hqd_queue_priority = prop->hqd_queue_priority;
2139 
2140 	mqd->cp_hqd_active = prop->hqd_active;
2141 
2142 	return 0;
2143 }
2144 
2145 static int gfx_v12_1_xcc_kiq_init_register(struct amdgpu_ring *ring,
2146 					   int xcc_id)
2147 {
2148 	struct amdgpu_device *adev = ring->adev;
2149 	struct v12_1_compute_mqd *mqd = ring->mqd_ptr;
2150 	int j;
2151 
2152 	/* inactivate the queue */
2153 	if (amdgpu_sriov_vf(adev))
2154 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0);
2155 
2156 	/* disable wptr polling */
2157 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
2158 
2159 	/* write the EOP addr */
2160 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR,
2161 	       mqd->cp_hqd_eop_base_addr_lo);
2162 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI,
2163 	       mqd->cp_hqd_eop_base_addr_hi);
2164 
2165 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2166 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL,
2167 	       mqd->cp_hqd_eop_control);
2168 
2169 	/* enable doorbell? */
2170 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
2171 	       mqd->cp_hqd_pq_doorbell_control);
2172 
2173 	/* disable the queue if it's active */
2174 	if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
2175 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
2176 		for (j = 0; j < adev->usec_timeout; j++) {
2177 			if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
2178 				break;
2179 			udelay(1);
2180 		}
2181 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
2182 		       mqd->cp_hqd_dequeue_request);
2183 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR,
2184 		       mqd->cp_hqd_pq_rptr);
2185 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
2186 		       mqd->cp_hqd_pq_wptr_lo);
2187 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
2188 		       mqd->cp_hqd_pq_wptr_hi);
2189 	}
2190 
2191 	/* set the pointer to the MQD */
2192 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR,
2193 	       mqd->cp_mqd_base_addr_lo);
2194 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI,
2195 	       mqd->cp_mqd_base_addr_hi);
2196 
2197 	/* set MQD vmid to 0 */
2198 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL,
2199 	       mqd->cp_mqd_control);
2200 
2201 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2202 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE,
2203 	       mqd->cp_hqd_pq_base_lo);
2204 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI,
2205 	       mqd->cp_hqd_pq_base_hi);
2206 
2207 	/* set up the HQD, this is similar to CP_RB0_CNTL */
2208 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL,
2209 	       mqd->cp_hqd_pq_control);
2210 
2211 	/* set the wb address whether it's enabled or not */
2212 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR,
2213 		mqd->cp_hqd_pq_rptr_report_addr_lo);
2214 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
2215 		mqd->cp_hqd_pq_rptr_report_addr_hi);
2216 
2217 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2218 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR,
2219 	       mqd->cp_hqd_pq_wptr_poll_addr_lo);
2220 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
2221 	       mqd->cp_hqd_pq_wptr_poll_addr_hi);
2222 
2223 	/* enable the doorbell if requested */
2224 	if (ring->use_doorbell) {
2225 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DOORBELL_RANGE_LOWER,
2226 			(adev->doorbell_index.kiq * 2) << 2);
2227 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DOORBELL_RANGE_UPPER,
2228 			(adev->doorbell_index.userqueue_end * 2) << 2);
2229 	}
2230 
2231 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
2232 	       mqd->cp_hqd_pq_doorbell_control);
2233 
2234 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2235 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
2236 	       mqd->cp_hqd_pq_wptr_lo);
2237 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
2238 	       mqd->cp_hqd_pq_wptr_hi);
2239 
2240 	/* set the vmid for the queue */
2241 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_VMID, mqd->cp_hqd_vmid);
2242 
2243 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE,
2244 	       mqd->cp_hqd_persistent_state);
2245 
2246 	/* activate the queue */
2247 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE,
2248 	       mqd->cp_hqd_active);
2249 
2250 	if (ring->use_doorbell)
2251 		WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_STATUS, DOORBELL_ENABLE, 1);
2252 
2253 	return 0;
2254 }
2255 
2256 static int gfx_v12_1_xcc_kiq_init_queue(struct amdgpu_ring *ring,
2257 					int xcc_id)
2258 {
2259 	struct amdgpu_device *adev = ring->adev;
2260 	struct v12_1_compute_mqd *mqd = ring->mqd_ptr;
2261 
2262 	gfx_v12_1_xcc_kiq_setting(ring, xcc_id);
2263 
2264 	if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
2265 		/* reset MQD to a clean status */
2266 		if (adev->gfx.kiq[xcc_id].mqd_backup)
2267 			memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(*mqd));
2268 
2269 		/* reset ring buffer */
2270 		ring->wptr = 0;
2271 		amdgpu_ring_clear_ring(ring);
2272 
2273 		mutex_lock(&adev->srbm_mutex);
2274 		soc_v1_0_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2275 		gfx_v12_1_xcc_kiq_init_register(ring, xcc_id);
2276 		soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2277 		mutex_unlock(&adev->srbm_mutex);
2278 	} else {
2279 		memset((void *)mqd, 0, sizeof(*mqd));
2280 		if (amdgpu_sriov_vf(adev) && adev->in_suspend)
2281 			amdgpu_ring_clear_ring(ring);
2282 		mutex_lock(&adev->srbm_mutex);
2283 		soc_v1_0_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2284 		amdgpu_ring_init_mqd(ring);
2285 		gfx_v12_1_xcc_kiq_init_register(ring, xcc_id);
2286 		soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2287 		mutex_unlock(&adev->srbm_mutex);
2288 
2289 		if (adev->gfx.kiq[xcc_id].mqd_backup)
2290 			memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(*mqd));
2291 	}
2292 
2293 	return 0;
2294 }
2295 
2296 static int gfx_v12_1_xcc_kcq_init_queue(struct amdgpu_ring *ring,
2297 					int xcc_id)
2298 {
2299 	struct amdgpu_device *adev = ring->adev;
2300 	struct v12_1_compute_mqd *mqd = ring->mqd_ptr;
2301 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
2302 
2303 	if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2304 		memset((void *)mqd, 0, sizeof(*mqd));
2305 		mutex_lock(&adev->srbm_mutex);
2306 		soc_v1_0_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2307 		amdgpu_ring_init_mqd(ring);
2308 		soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2309 		mutex_unlock(&adev->srbm_mutex);
2310 
2311 		if (adev->gfx.mec.mqd_backup[mqd_idx])
2312 			memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
2313 	} else {
2314 		/* restore MQD to a clean status */
2315 		if (adev->gfx.mec.mqd_backup[mqd_idx])
2316 			memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
2317 		/* reset ring buffer */
2318 		ring->wptr = 0;
2319 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
2320 		amdgpu_ring_clear_ring(ring);
2321 	}
2322 
2323 	return 0;
2324 }
2325 
2326 static int gfx_v12_1_xcc_kiq_resume(struct amdgpu_device *adev,
2327 				    int xcc_id)
2328 {
2329 	struct amdgpu_ring *ring;
2330 	int r;
2331 
2332 	ring = &adev->gfx.kiq[xcc_id].ring;
2333 
2334 	r = amdgpu_bo_reserve(ring->mqd_obj, false);
2335 	if (unlikely(r != 0))
2336 		return r;
2337 
2338 	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2339 	if (unlikely(r != 0)) {
2340 		amdgpu_bo_unreserve(ring->mqd_obj);
2341 		return r;
2342 	}
2343 
2344 	gfx_v12_1_xcc_kiq_init_queue(ring, xcc_id);
2345 	amdgpu_bo_kunmap(ring->mqd_obj);
2346 	ring->mqd_ptr = NULL;
2347 	amdgpu_bo_unreserve(ring->mqd_obj);
2348 	ring->sched.ready = true;
2349 	return 0;
2350 }
2351 
2352 static int gfx_v12_1_xcc_kcq_resume(struct amdgpu_device *adev,
2353 				    int xcc_id)
2354 {
2355 	struct amdgpu_ring *ring = NULL;
2356 	int r = 0, i;
2357 
2358 	if (!amdgpu_async_gfx_ring)
2359 		gfx_v12_1_xcc_cp_compute_enable(adev, true, xcc_id);
2360 
2361 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2362 		ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
2363 
2364 		r = amdgpu_bo_reserve(ring->mqd_obj, false);
2365 		if (unlikely(r != 0))
2366 			goto done;
2367 		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2368 		if (!r) {
2369 			r = gfx_v12_1_xcc_kcq_init_queue(ring, xcc_id);
2370 			amdgpu_bo_kunmap(ring->mqd_obj);
2371 			ring->mqd_ptr = NULL;
2372 		}
2373 		amdgpu_bo_unreserve(ring->mqd_obj);
2374 		if (r)
2375 			goto done;
2376 	}
2377 
2378 	r = amdgpu_gfx_enable_kcq(adev, xcc_id);
2379 done:
2380 	return r;
2381 }
2382 
2383 static int gfx_v12_1_xcc_cp_resume(struct amdgpu_device *adev,
2384 				   int xcc_id)
2385 {
2386 	int r, i;
2387 	struct amdgpu_ring *ring;
2388 
2389 	if (!(adev->flags & AMD_IS_APU))
2390 		gfx_v12_1_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
2391 
2392 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
2393 		/* legacy firmware loading */
2394 		r = gfx_v12_1_xcc_cp_compute_load_microcode_rs64(adev, xcc_id);
2395 		if (r)
2396 			return r;
2397 	}
2398 
2399 	gfx_v12_1_xcc_cp_set_doorbell_range(adev, xcc_id);
2400 
2401 	if (amdgpu_async_gfx_ring) {
2402 		gfx_v12_1_xcc_cp_compute_enable(adev, true, xcc_id);
2403 	}
2404 
2405 	if (adev->enable_mes_kiq && adev->mes.kiq_hw_init)
2406 		r = amdgpu_mes_kiq_hw_init(adev, xcc_id);
2407 	else
2408 		r = gfx_v12_1_xcc_kiq_resume(adev, xcc_id);
2409 	if (r)
2410 		return r;
2411 
2412 	r = gfx_v12_1_xcc_kcq_resume(adev, xcc_id);
2413 	if (r)
2414 		return r;
2415 
2416 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2417 		ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
2418 		r = amdgpu_ring_test_helper(ring);
2419 		if (r)
2420 			return r;
2421 	}
2422 
2423 	return 0;
2424 }
2425 
2426 static int gfx_v12_1_cp_resume(struct amdgpu_device *adev)
2427 {
2428 	int i, r, num_xcc;
2429 
2430 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2431 
2432 	for (i = 0; i < num_xcc; i++) {
2433 		r = gfx_v12_1_xcc_cp_resume(adev, i);
2434 		if (r)
2435 			return r;
2436 	}
2437 
2438 	return 0;
2439 }
2440 
2441 static int gfx_v12_1_gfxhub_enable(struct amdgpu_device *adev)
2442 {
2443 	int r;
2444 	bool value;
2445 
2446 	r = adev->gfxhub.funcs->gart_enable(adev);
2447 	if (r)
2448 		return r;
2449 
2450 	value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
2451 		false : true;
2452 
2453 	adev->gfxhub.funcs->set_fault_enable_default(adev, value);
2454 	/* TODO investigate why TLB flush is needed,
2455 	 * are we missing a flush somewhere else? */
2456 	adev->gmc.gmc_funcs->flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0);
2457 
2458 	return 0;
2459 }
2460 
2461 static int get_gb_addr_config(struct amdgpu_device *adev)
2462 {
2463 	u32 gb_addr_config;
2464 
2465 	gb_addr_config = RREG32_SOC15(GC, GET_INST(GC, 0), regGB_ADDR_CONFIG_READ);
2466 	if (gb_addr_config == 0)
2467 		return -EINVAL;
2468 
2469 	adev->gfx.config.gb_addr_config_fields.num_pkrs =
2470 		1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG_READ, NUM_PKRS);
2471 
2472 	adev->gfx.config.gb_addr_config = gb_addr_config;
2473 
2474 	adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
2475 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2476 				      GB_ADDR_CONFIG_READ, NUM_PIPES);
2477 
2478 	adev->gfx.config.max_tile_pipes =
2479 		adev->gfx.config.gb_addr_config_fields.num_pipes;
2480 
2481 	adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
2482 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2483 				      GB_ADDR_CONFIG_READ, MAX_COMPRESSED_FRAGS);
2484 	adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
2485 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2486 				      GB_ADDR_CONFIG_READ, NUM_RB_PER_SE);
2487 	adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
2488 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2489 				      GB_ADDR_CONFIG_READ, NUM_SHADER_ENGINES);
2490 	adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
2491 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2492 				      GB_ADDR_CONFIG_READ, PIPE_INTERLEAVE_SIZE));
2493 
2494 	return 0;
2495 }
2496 
2497 static void gfx_v12_1_xcc_disable_gpa_mode(struct amdgpu_device *adev,
2498 					   int xcc_id)
2499 {
2500 	uint32_t data;
2501 
2502 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG);
2503 	data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK;
2504 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data);
2505 
2506 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPG_PSP_DEBUG);
2507 	data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK;
2508 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPG_PSP_DEBUG, data);
2509 }
2510 
2511 static void gfx_v12_1_init_golden_registers(struct amdgpu_device *adev)
2512 {
2513 	uint32_t val;
2514 
2515 	/* Setup the TCP Thrashing control register */
2516 	val = RREG32_SOC15(GC, 0, regTCP_UTCL0_THRASHING_CTRL);
2517 
2518 	val = REG_SET_FIELD(val, TCP_UTCL0_THRASHING_CTRL, THRASHING_EN, 0x2);
2519 	val = REG_SET_FIELD(val, TCP_UTCL0_THRASHING_CTRL,
2520 				RETRY_FRAGMENT_THRESHOLD_DOWN_EN, 0x0);
2521 	val = REG_SET_FIELD(val, TCP_UTCL0_THRASHING_CTRL,
2522 				RETRY_FRAGMENT_THRESHOLD_UP_EN, 0x0);
2523 
2524 	WREG32_SOC15(GC, 0, regTCP_UTCL0_THRASHING_CTRL, val);
2525 
2526 	/* Set the TCP UTCL0 register to enable atomics */
2527 	val = RREG32_SOC15(GC, 0, regTCP_UTCL0_CNTL1);
2528 	val = REG_SET_FIELD(val, TCP_UTCL0_CNTL1, ATOMIC_REQUESTER_EN, 0x1);
2529 
2530 	WREG32_SOC15(GC, 0, regTCP_UTCL0_CNTL1, val);
2531 }
2532 
2533 static int gfx_v12_1_hw_init(struct amdgpu_ip_block *ip_block)
2534 {
2535 	int r, i, num_xcc;
2536 	struct amdgpu_device *adev = ip_block->adev;
2537 
2538 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
2539 		if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) {
2540 			/* RLC autoload sequence 1: Program rlc ram */
2541 			if (adev->gfx.imu.funcs->program_rlc_ram)
2542 				adev->gfx.imu.funcs->program_rlc_ram(adev);
2543 		}
2544 		/* rlc autoload firmware */
2545 		r = gfx_v12_1_rlc_backdoor_autoload_enable(adev);
2546 		if (r)
2547 			return r;
2548 	} else {
2549 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
2550 			num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2551 
2552 			for (i = 0; i < num_xcc; i++) {
2553 				if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) {
2554 					if (adev->gfx.imu.funcs->load_microcode)
2555 						adev->gfx.imu.funcs->load_microcode(adev);
2556 					if (adev->gfx.imu.funcs->setup_imu)
2557 						adev->gfx.imu.funcs->setup_imu(adev);
2558 					if (adev->gfx.imu.funcs->start_imu)
2559 						adev->gfx.imu.funcs->start_imu(adev);
2560 				}
2561 
2562 				/* disable gpa mode in backdoor loading */
2563 				gfx_v12_1_xcc_disable_gpa_mode(adev, i);
2564 			}
2565 		}
2566 	}
2567 
2568 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) ||
2569 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2570 		r = gfx_v12_1_wait_for_rlc_autoload_complete(adev);
2571 		if (r) {
2572 			dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r);
2573 			return r;
2574 		}
2575 	}
2576 
2577 	adev->gfx.is_poweron = true;
2578 
2579 	if (get_gb_addr_config(adev))
2580 		DRM_WARN("Invalid gb_addr_config !\n");
2581 
2582 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2583 		gfx_v12_1_config_gfx_rs64(adev);
2584 
2585 	r = gfx_v12_1_gfxhub_enable(adev);
2586 	if (r)
2587 		return r;
2588 
2589 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT ||
2590 	     adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) &&
2591 	     (amdgpu_dpm == 1)) {
2592 		/**
2593 		 * For gfx 12, rlc firmware loading relies on smu firmware is
2594 		 * loaded firstly, so in direct type, it has to load smc ucode
2595 		 * here before rlc.
2596 		 */
2597 		if (!(adev->flags & AMD_IS_APU)) {
2598 			r = amdgpu_pm_load_smu_firmware(adev, NULL);
2599 			if (r)
2600 				return r;
2601 		}
2602 	}
2603 
2604 	gfx_v12_1_init_golden_registers(adev);
2605 
2606 	gfx_v12_1_constants_init(adev);
2607 
2608 	if (adev->nbio.funcs->gc_doorbell_init)
2609 		adev->nbio.funcs->gc_doorbell_init(adev);
2610 
2611 	r = gfx_v12_1_rlc_resume(adev);
2612 	if (r)
2613 		return r;
2614 
2615 	/*
2616 	 * init golden registers and rlc resume may override some registers,
2617 	 * reconfig them here
2618 	 */
2619 	gfx_v12_1_tcp_harvest(adev);
2620 
2621 	r = gfx_v12_1_cp_resume(adev);
2622 	if (r)
2623 		return r;
2624 
2625 	return r;
2626 }
2627 
2628 static void gfx_v12_1_xcc_fini(struct amdgpu_device *adev,
2629 			      int xcc_id)
2630 {
2631 	uint32_t tmp;
2632 
2633 	if (!adev->no_hw_access) {
2634 		if (amdgpu_gfx_disable_kcq(adev, xcc_id))
2635 			DRM_ERROR("KCQ disable failed\n");
2636 
2637 		amdgpu_mes_kiq_hw_fini(adev, xcc_id);
2638 	}
2639 
2640 	if (amdgpu_sriov_vf(adev)) {
2641 		/* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
2642 		tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
2643 		tmp &= 0xffffff00;
2644 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
2645 	}
2646 	gfx_v12_1_xcc_cp_compute_enable(adev, false, xcc_id);
2647 	gfx_v12_1_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
2648 }
2649 
2650 static int gfx_v12_1_hw_fini(struct amdgpu_ip_block *ip_block)
2651 {
2652 	struct amdgpu_device *adev = ip_block->adev;
2653 	int i, num_xcc;
2654 
2655 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
2656 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
2657 
2658 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2659 	for (i = 0; i < num_xcc; i++) {
2660 		gfx_v12_1_xcc_fini(adev, i);
2661 	}
2662 
2663 	adev->gfxhub.funcs->gart_disable(adev);
2664 
2665 	adev->gfx.is_poweron = false;
2666 
2667 	return 0;
2668 }
2669 
2670 static int gfx_v12_1_suspend(struct amdgpu_ip_block *ip_block)
2671 {
2672 	return gfx_v12_1_hw_fini(ip_block);
2673 }
2674 
2675 static int gfx_v12_1_resume(struct amdgpu_ip_block *ip_block)
2676 {
2677 	return gfx_v12_1_hw_init(ip_block);
2678 }
2679 
2680 static bool gfx_v12_1_is_idle(struct amdgpu_ip_block *ip_block)
2681 {
2682 	struct amdgpu_device *adev = ip_block->adev;
2683 	int i, num_xcc;
2684 
2685 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2686 	for (i = 0; i < num_xcc; i++) {
2687 		if (REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, i),
2688 				regGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE))
2689 			return false;
2690 	}
2691 	return true;
2692 }
2693 
2694 static int gfx_v12_1_wait_for_idle(struct amdgpu_ip_block *ip_block)
2695 {
2696 	unsigned i;
2697 	struct amdgpu_device *adev = ip_block->adev;
2698 
2699 	for (i = 0; i < adev->usec_timeout; i++) {
2700 		if (gfx_v12_1_is_idle(ip_block))
2701 			return 0;
2702 		udelay(1);
2703 	}
2704 	return -ETIMEDOUT;
2705 }
2706 
2707 static uint64_t gfx_v12_1_get_gpu_clock_counter(struct amdgpu_device *adev)
2708 {
2709 	uint64_t clock = 0;
2710 
2711 	if (adev->smuio.funcs &&
2712 	    adev->smuio.funcs->get_gpu_clock_counter)
2713 		clock = adev->smuio.funcs->get_gpu_clock_counter(adev);
2714 	else
2715 		dev_warn(adev->dev, "query gpu clock counter is not supported\n");
2716 
2717 	return clock;
2718 }
2719 
2720 static int gfx_v12_1_early_init(struct amdgpu_ip_block *ip_block)
2721 {
2722 	struct amdgpu_device *adev = ip_block->adev;
2723 
2724 	adev->gfx.funcs = &gfx_v12_1_gfx_funcs;
2725 
2726 	adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
2727 					  AMDGPU_MAX_COMPUTE_RINGS);
2728 
2729 	gfx_v12_1_set_kiq_pm4_funcs(adev);
2730 	gfx_v12_1_set_ring_funcs(adev);
2731 	gfx_v12_1_set_irq_funcs(adev);
2732 	gfx_v12_1_set_rlc_funcs(adev);
2733 	gfx_v12_1_set_mqd_funcs(adev);
2734 	gfx_v12_1_set_imu_funcs(adev);
2735 
2736 	gfx_v12_1_init_rlcg_reg_access_ctrl(adev);
2737 
2738 	return gfx_v12_1_init_microcode(adev);
2739 }
2740 
2741 static int gfx_v12_1_late_init(struct amdgpu_ip_block *ip_block)
2742 {
2743 	struct amdgpu_device *adev = ip_block->adev;
2744 	int r;
2745 
2746 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
2747 	if (r)
2748 		return r;
2749 
2750 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
2751 	if (r)
2752 		return r;
2753 
2754 	return 0;
2755 }
2756 
2757 static bool gfx_v12_1_is_rlc_enabled(struct amdgpu_device *adev)
2758 {
2759 	uint32_t rlc_cntl;
2760 
2761 	/* if RLC is not enabled, do nothing */
2762 	rlc_cntl = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL);
2763 	return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
2764 }
2765 
2766 static void gfx_v12_1_xcc_set_safe_mode(struct amdgpu_device *adev,
2767 					int xcc_id)
2768 {
2769 	uint32_t data;
2770 	unsigned i;
2771 
2772 	data = RLC_SAFE_MODE__CMD_MASK;
2773 	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
2774 
2775 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
2776 
2777 	/* wait for RLC_SAFE_MODE */
2778 	for (i = 0; i < adev->usec_timeout; i++) {
2779 		if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2780 						regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
2781 			break;
2782 		udelay(1);
2783 	}
2784 }
2785 
2786 static void gfx_v12_1_xcc_unset_safe_mode(struct amdgpu_device *adev,
2787 					  int xcc_id)
2788 {
2789 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2790 		     regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK);
2791 }
2792 
2793 static void gfx_v12_1_update_perf_clk(struct amdgpu_device *adev,
2794 				      bool enable)
2795 {
2796 	int i, num_xcc;
2797 
2798 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2799 	for (i = 0; i < num_xcc; i++)
2800 		gfx_v12_1_xcc_update_perf_clk(adev, enable, i);
2801 }
2802 
2803 static void gfx_v12_1_update_spm_vmid(struct amdgpu_device *adev,
2804 				      int xcc_id,
2805 				      struct amdgpu_ring *ring,
2806 				      unsigned vmid)
2807 {
2808 	u32 reg, data;
2809 
2810 	reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL);
2811 	if (amdgpu_sriov_is_pp_one_vf(adev))
2812 		data = RREG32_NO_KIQ(reg);
2813 	else
2814 		data = RREG32(reg);
2815 
2816 	data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
2817 	data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
2818 
2819 	if (amdgpu_sriov_is_pp_one_vf(adev))
2820 		WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL, data);
2821 	else
2822 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL, data);
2823 
2824 	if (ring
2825 	    && amdgpu_sriov_is_pp_one_vf(adev)
2826 	    && ((ring->funcs->type == AMDGPU_RING_TYPE_GFX)
2827 		|| (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE))) {
2828 		uint32_t reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL);
2829 		amdgpu_ring_emit_wreg(ring, reg, data);
2830 	}
2831 }
2832 
2833 static const struct amdgpu_rlc_funcs gfx_v12_1_rlc_funcs = {
2834 	.is_rlc_enabled = gfx_v12_1_is_rlc_enabled,
2835 	.set_safe_mode = gfx_v12_1_xcc_set_safe_mode,
2836 	.unset_safe_mode = gfx_v12_1_xcc_unset_safe_mode,
2837 	.init = gfx_v12_1_rlc_init,
2838 	.get_csb_size = gfx_v12_1_get_csb_size,
2839 	.get_csb_buffer = gfx_v12_1_get_csb_buffer,
2840 	.resume = gfx_v12_1_rlc_resume,
2841 	.stop = gfx_v12_1_rlc_stop,
2842 	.reset = gfx_v12_1_rlc_reset,
2843 	.start = gfx_v12_1_rlc_start,
2844 	.update_spm_vmid = gfx_v12_1_update_spm_vmid,
2845 };
2846 
2847 #if 0
2848 static void gfx_v12_cntl_power_gating(struct amdgpu_device *adev, bool enable)
2849 {
2850 	/* TODO */
2851 }
2852 
2853 static void gfx_v12_cntl_pg(struct amdgpu_device *adev, bool enable)
2854 {
2855 	/* TODO */
2856 }
2857 #endif
2858 
2859 static int gfx_v12_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
2860 					   enum amd_powergating_state state)
2861 {
2862 	struct amdgpu_device *adev = ip_block->adev;
2863 	bool enable = (state == AMD_PG_STATE_GATE);
2864 
2865 	if (amdgpu_sriov_vf(adev))
2866 		return 0;
2867 
2868 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2869 	case IP_VERSION(12, 1, 0):
2870 		amdgpu_gfx_off_ctrl(adev, enable);
2871 		break;
2872 	default:
2873 		break;
2874 	}
2875 
2876 	return 0;
2877 }
2878 
2879 static void gfx_v12_1_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
2880 							   bool enable, int xcc_id)
2881 {
2882 	uint32_t def, data;
2883 
2884 	if (!(adev->cg_flags &
2885 	      (AMD_CG_SUPPORT_GFX_CGCG |
2886 	      AMD_CG_SUPPORT_GFX_CGLS |
2887 	      AMD_CG_SUPPORT_GFX_3D_CGCG |
2888 	      AMD_CG_SUPPORT_GFX_3D_CGLS)))
2889 		return;
2890 
2891 	if (enable) {
2892 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2893 					  regRLC_CGTT_MGCG_OVERRIDE);
2894 
2895 		/* unset CGCG override */
2896 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
2897 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
2898 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2899 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2900 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG ||
2901 		    adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
2902 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
2903 
2904 		/* update CGCG override bits */
2905 		if (def != data)
2906 			WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2907 				     regRLC_CGTT_MGCG_OVERRIDE, data);
2908 
2909 		/* enable cgcg FSM(0x0000363F) */
2910 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2911 
2912 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
2913 			data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK;
2914 			data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
2915 				 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
2916 		}
2917 
2918 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
2919 			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK;
2920 			data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
2921 				 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
2922 		}
2923 
2924 		if (def != data)
2925 			WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2926 				     regRLC_CGCG_CGLS_CTRL, data);
2927 
2928 		/* set IDLE_POLL_COUNT(0x00900100) */
2929 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL);
2930 
2931 		data &= ~CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK;
2932 		data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
2933 		data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
2934 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2935 
2936 		if (def != data)
2937 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data);
2938 
2939 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL);
2940 		data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
2941 		data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
2942 		data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
2943 		data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
2944 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL, data);
2945 	} else {
2946 		/* Program RLC_CGCG_CGLS_CTRL */
2947 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2948 
2949 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
2950 			data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
2951 
2952 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2953 			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
2954 
2955 		if (def != data)
2956 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
2957 	}
2958 }
2959 
2960 static void gfx_v12_1_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev,
2961 							   bool enable, int xcc_id)
2962 {
2963 	uint32_t data, def;
2964 	if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)))
2965 		return;
2966 
2967 	/* It is disabled by HW by default */
2968 	if (enable) {
2969 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
2970 			/* 1 - RLC_CGTT_MGCG_OVERRIDE */
2971 			def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2972 
2973 			data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2974 				  RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2975 				  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
2976 
2977 			if (def != data)
2978 				WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2979 		}
2980 	} else {
2981 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
2982 			def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2983 
2984 			data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2985 				 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2986 				 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
2987 
2988 			if (def != data)
2989 				WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2990 		}
2991 	}
2992 }
2993 
2994 static void gfx_v12_1_xcc_update_repeater_fgcg(struct amdgpu_device *adev,
2995 					       bool enable, int xcc_id)
2996 {
2997 	uint32_t def, data;
2998 
2999 	if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
3000 		return;
3001 
3002 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
3003 
3004 	if (enable)
3005 		data &= ~(RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK |
3006 				  RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE_MASK);
3007 	else
3008 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK |
3009 				RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE_MASK;
3010 
3011 	if (def != data)
3012 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
3013 }
3014 
3015 static void gfx_v12_1_xcc_update_sram_fgcg(struct amdgpu_device *adev,
3016 					   bool enable, int xcc_id)
3017 {
3018 	uint32_t def, data;
3019 
3020 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
3021 		return;
3022 
3023 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
3024 
3025 	if (enable)
3026 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
3027 	else
3028 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
3029 
3030 	if (def != data)
3031 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
3032 }
3033 
3034 static void gfx_v12_1_xcc_update_perf_clk(struct amdgpu_device *adev,
3035 					  bool enable, int xcc_id)
3036 {
3037 	uint32_t def, data;
3038 
3039 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK))
3040 		return;
3041 
3042 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
3043 
3044 	if (enable)
3045 		data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
3046 	else
3047 		data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
3048 
3049 	if (def != data)
3050 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
3051 }
3052 
3053 static int gfx_v12_1_xcc_update_gfx_clock_gating(struct amdgpu_device *adev,
3054 					     bool enable, int xcc_id)
3055 {
3056 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
3057 
3058 	gfx_v12_1_xcc_update_coarse_grain_clock_gating(adev, enable, xcc_id);
3059 
3060 	gfx_v12_1_xcc_update_medium_grain_clock_gating(adev, enable, xcc_id);
3061 
3062 	gfx_v12_1_xcc_update_repeater_fgcg(adev, enable, xcc_id);
3063 
3064 	gfx_v12_1_xcc_update_sram_fgcg(adev, enable, xcc_id);
3065 
3066 	gfx_v12_1_xcc_update_perf_clk(adev, enable, xcc_id);
3067 
3068 	if (adev->cg_flags &
3069 	    (AMD_CG_SUPPORT_GFX_MGCG |
3070 	     AMD_CG_SUPPORT_GFX_CGLS |
3071 	     AMD_CG_SUPPORT_GFX_CGCG |
3072 	     AMD_CG_SUPPORT_GFX_3D_CGCG |
3073 	     AMD_CG_SUPPORT_GFX_3D_CGLS))
3074 		gfx_v12_1_xcc_enable_gui_idle_interrupt(adev, enable, xcc_id);
3075 
3076 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
3077 
3078 	return 0;
3079 }
3080 
3081 static int gfx_v12_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
3082 					   enum amd_clockgating_state state)
3083 {
3084 	struct amdgpu_device *adev = ip_block->adev;
3085 	int i, num_xcc;
3086 
3087 	if (amdgpu_sriov_vf(adev))
3088 		return 0;
3089 
3090 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3091 	switch (adev->ip_versions[GC_HWIP][0]) {
3092 	case IP_VERSION(12, 1, 0):
3093 		for (i = 0; i < num_xcc; i++)
3094 			gfx_v12_1_xcc_update_gfx_clock_gating(adev,
3095 				  state == AMD_CG_STATE_GATE, i);
3096 		break;
3097 	default:
3098 		break;
3099 	}
3100 
3101 	return 0;
3102 }
3103 
3104 static void gfx_v12_1_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
3105 {
3106 	struct amdgpu_device *adev = ip_block->adev;
3107 	int data;
3108 
3109 	/* AMD_CG_SUPPORT_GFX_MGCG */
3110 	data = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CGTT_MGCG_OVERRIDE);
3111 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
3112 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
3113 
3114 	/* AMD_CG_SUPPORT_REPEATER_FGCG */
3115 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK))
3116 		*flags |= AMD_CG_SUPPORT_REPEATER_FGCG;
3117 
3118 	/* AMD_CG_SUPPORT_GFX_FGCG */
3119 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK))
3120 		*flags |= AMD_CG_SUPPORT_GFX_FGCG;
3121 
3122 	/* AMD_CG_SUPPORT_GFX_PERF_CLK */
3123 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK))
3124 		*flags |= AMD_CG_SUPPORT_GFX_PERF_CLK;
3125 
3126 	/* AMD_CG_SUPPORT_GFX_CGCG */
3127 	data = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CGCG_CGLS_CTRL);
3128 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
3129 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
3130 
3131 	/* AMD_CG_SUPPORT_GFX_CGLS */
3132 	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
3133 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
3134 }
3135 
3136 static u64 gfx_v12_1_ring_get_rptr_compute(struct amdgpu_ring *ring)
3137 {
3138 	/* gfx12 hardware is 32bit rptr */
3139 	return *(uint32_t *)ring->rptr_cpu_addr;
3140 }
3141 
3142 static u64 gfx_v12_1_ring_get_wptr_compute(struct amdgpu_ring *ring)
3143 {
3144 	u64 wptr;
3145 
3146 	/* XXX check if swapping is necessary on BE */
3147 	if (ring->use_doorbell)
3148 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
3149 	else
3150 		BUG();
3151 	return wptr;
3152 }
3153 
3154 static void gfx_v12_1_ring_set_wptr_compute(struct amdgpu_ring *ring)
3155 {
3156 	struct amdgpu_device *adev = ring->adev;
3157 
3158 	/* XXX check if swapping is necessary on BE */
3159 	if (ring->use_doorbell) {
3160 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
3161 			     ring->wptr);
3162 		WDOORBELL64(ring->doorbell_index, ring->wptr);
3163 	} else {
3164 		BUG(); /* only DOORBELL method supported on gfx12 now */
3165 	}
3166 }
3167 
3168 static void gfx_v12_1_ring_emit_ib_compute(struct amdgpu_ring *ring,
3169 					   struct amdgpu_job *job,
3170 					   struct amdgpu_ib *ib,
3171 					   uint32_t flags)
3172 {
3173 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
3174 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
3175 
3176 	/* Currently, there is a high possibility to get wave ID mismatch
3177 	 * between ME and GDS, leading to a hw deadlock, because ME generates
3178 	 * different wave IDs than the GDS expects. This situation happens
3179 	 * randomly when at least 5 compute pipes use GDS ordered append.
3180 	 * The wave IDs generated by ME are also wrong after suspend/resume.
3181 	 * Those are probably bugs somewhere else in the kernel driver.
3182 	 *
3183 	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
3184 	 * GDS to 0 for this ring (me/pipe).
3185 	 */
3186 	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
3187 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3188 		amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
3189 	}
3190 
3191 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3192 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3193 	amdgpu_ring_write(ring,
3194 #ifdef __BIG_ENDIAN
3195 				(2 << 0) |
3196 #endif
3197 				lower_32_bits(ib->gpu_addr));
3198 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
3199 	amdgpu_ring_write(ring, control);
3200 }
3201 
3202 static void gfx_v12_1_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
3203 				     u64 seq, unsigned flags)
3204 {
3205 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
3206 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
3207 
3208 	/* RELEASE_MEM - flush caches, send int */
3209 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
3210 	amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ(1) |
3211 				 PACKET3_RELEASE_MEM_GCR_GLV_WB |
3212 				 PACKET3_RELEASE_MEM_GCR_GL2_WB |
3213 				 PACKET3_RELEASE_MEM_GCR_GL2_SCOPE(2) |
3214 				 PACKET3_RELEASE_MEM_TEMPORAL(3) |
3215 				 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
3216 				 PACKET3_RELEASE_MEM_EVENT_INDEX(5)));
3217 	amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) |
3218 				 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0)));
3219 
3220 	/*
3221 	 * the address should be Qword aligned if 64bit write, Dword
3222 	 * aligned if only send 32bit data low (discard data high)
3223 	 */
3224 	if (write64bit)
3225 		BUG_ON(addr & 0x7);
3226 	else
3227 		BUG_ON(addr & 0x3);
3228 	amdgpu_ring_write(ring, lower_32_bits(addr));
3229 	amdgpu_ring_write(ring, upper_32_bits(addr));
3230 	amdgpu_ring_write(ring, lower_32_bits(seq));
3231 	amdgpu_ring_write(ring, upper_32_bits(seq));
3232 	amdgpu_ring_write(ring, 0);
3233 }
3234 
3235 static void gfx_v12_1_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
3236 {
3237 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3238 	uint32_t seq = ring->fence_drv.sync_seq;
3239 	uint64_t addr = ring->fence_drv.gpu_addr;
3240 
3241 	gfx_v12_1_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr),
3242 			       upper_32_bits(addr), seq, 0xffffffff, 4);
3243 }
3244 
3245 static void gfx_v12_1_ring_invalidate_tlbs(struct amdgpu_ring *ring,
3246 				   uint16_t pasid, uint32_t flush_type,
3247 				   bool all_hub, uint8_t dst_sel)
3248 {
3249 	amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
3250 	amdgpu_ring_write(ring,
3251 			  PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) |
3252 			  PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
3253 			  PACKET3_INVALIDATE_TLBS_PASID(pasid) |
3254 			  PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
3255 }
3256 
3257 static void gfx_v12_1_ring_emit_vm_flush(struct amdgpu_ring *ring,
3258 					 unsigned vmid, uint64_t pd_addr)
3259 {
3260 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
3261 
3262 	/* compute doesn't have PFP */
3263 	if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
3264 		/* sync PFP to ME, otherwise we might get invalid PFP reads */
3265 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3266 		amdgpu_ring_write(ring, 0x0);
3267 	}
3268 }
3269 
3270 static void gfx_v12_1_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
3271 					  u64 seq, unsigned int flags)
3272 {
3273 	struct amdgpu_device *adev = ring->adev;
3274 
3275 	/* we only allocate 32bit for each seq wb address */
3276 	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
3277 
3278 	/* write fence seq to the "addr" */
3279 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3280 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3281 				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
3282 	amdgpu_ring_write(ring, lower_32_bits(addr));
3283 	amdgpu_ring_write(ring, upper_32_bits(addr));
3284 	amdgpu_ring_write(ring, lower_32_bits(seq));
3285 
3286 	if (flags & AMDGPU_FENCE_FLAG_INT) {
3287 		/* set register to trigger INT */
3288 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3289 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3290 					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
3291 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCPC_INT_STATUS));
3292 		amdgpu_ring_write(ring, 0);
3293 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
3294 	}
3295 }
3296 
3297 static void gfx_v12_1_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
3298 				     uint32_t reg_val_offs)
3299 {
3300 	struct amdgpu_device *adev = ring->adev;
3301 
3302 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
3303 	amdgpu_ring_write(ring, 0 |	/* src: register*/
3304 				(5 << 8) |	/* dst: memory */
3305 				(1 << 20));	/* write confirm */
3306 	amdgpu_ring_write(ring, reg);
3307 	amdgpu_ring_write(ring, 0);
3308 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
3309 				reg_val_offs * 4));
3310 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
3311 				reg_val_offs * 4));
3312 }
3313 
3314 static void gfx_v12_1_ring_emit_wreg(struct amdgpu_ring *ring,
3315 				     uint32_t reg,
3316 				     uint32_t val)
3317 {
3318 	uint32_t cmd = 0;
3319 
3320 	switch (ring->funcs->type) {
3321 	case AMDGPU_RING_TYPE_KIQ:
3322 		cmd = (1 << 16); /* no inc addr */
3323 		break;
3324 	default:
3325 		cmd = WR_CONFIRM;
3326 		break;
3327 	}
3328 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3329 	amdgpu_ring_write(ring, cmd);
3330 	amdgpu_ring_write(ring, reg);
3331 	amdgpu_ring_write(ring, 0);
3332 	amdgpu_ring_write(ring, val);
3333 }
3334 
3335 static void gfx_v12_1_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
3336 					uint32_t val, uint32_t mask)
3337 {
3338 	gfx_v12_1_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
3339 }
3340 
3341 static void gfx_v12_1_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
3342 						   uint32_t reg0, uint32_t reg1,
3343 						   uint32_t ref, uint32_t mask)
3344 {
3345 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3346 
3347 	gfx_v12_1_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
3348 			       ref, mask, 0x20);
3349 }
3350 
3351 static void gfx_v12_1_xcc_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
3352 							int me, int pipe,
3353 							enum amdgpu_interrupt_state state,
3354 							int xcc_id)
3355 {
3356 	u32 mec_int_cntl, mec_int_cntl_reg;
3357 
3358 	/*
3359 	 * amdgpu controls only the first MEC. That's why this function only
3360 	 * handles the setting of interrupts for this specific MEC. All other
3361 	 * pipes' interrupts are set by amdkfd.
3362 	 */
3363 
3364 	if (me == 1) {
3365 		switch (pipe) {
3366 		case 0:
3367 			mec_int_cntl_reg = SOC15_REG_OFFSET(
3368 					GC, GET_INST(GC, xcc_id),
3369 					regCP_ME1_PIPE0_INT_CNTL);
3370 			break;
3371 		case 1:
3372 			mec_int_cntl_reg = SOC15_REG_OFFSET(
3373 					GC, GET_INST(GC, xcc_id),
3374 					regCP_ME1_PIPE1_INT_CNTL);
3375 			break;
3376 		case 2:
3377 			mec_int_cntl_reg = SOC15_REG_OFFSET(
3378 					GC, GET_INST(GC, xcc_id),
3379 					regCP_ME1_PIPE2_INT_CNTL);
3380 			break;
3381 		case 3:
3382 			mec_int_cntl_reg = SOC15_REG_OFFSET(
3383 					GC, GET_INST(GC, xcc_id),
3384 					regCP_ME1_PIPE3_INT_CNTL);
3385 			break;
3386 		default:
3387 			DRM_DEBUG("invalid pipe %d\n", pipe);
3388 			return;
3389 		}
3390 	} else {
3391 		DRM_DEBUG("invalid me %d\n", me);
3392 		return;
3393 	}
3394 
3395 	switch (state) {
3396 	case AMDGPU_IRQ_STATE_DISABLE:
3397 		mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3398 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3399 					     TIME_STAMP_INT_ENABLE, 0);
3400 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3401 					     GENERIC0_INT_ENABLE, 0);
3402 		WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3403 		break;
3404 	case AMDGPU_IRQ_STATE_ENABLE:
3405 		mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3406 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3407 					     TIME_STAMP_INT_ENABLE, 1);
3408 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3409 					     GENERIC0_INT_ENABLE, 1);
3410 		WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3411 		break;
3412 	default:
3413 		break;
3414 	}
3415 }
3416 
3417 static int gfx_v12_1_set_eop_interrupt_state(struct amdgpu_device *adev,
3418 					    struct amdgpu_irq_src *src,
3419 					    unsigned type,
3420 					    enum amdgpu_interrupt_state state)
3421 {
3422 	int i, num_xcc;
3423 
3424 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3425 	for (i = 0; i < num_xcc; i++) {
3426 		switch (type) {
3427 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
3428 			gfx_v12_1_xcc_set_compute_eop_interrupt_state(
3429 					adev, 1, 0, state, i);
3430 			break;
3431 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
3432 			gfx_v12_1_xcc_set_compute_eop_interrupt_state(
3433 					adev, 1, 1, state, i);
3434 			break;
3435 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
3436 			gfx_v12_1_xcc_set_compute_eop_interrupt_state(
3437 					adev, 1, 2, state, i);
3438 			break;
3439 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
3440 			gfx_v12_1_xcc_set_compute_eop_interrupt_state(
3441 					adev, 1, 3, state, i);
3442 			break;
3443 		default:
3444 			break;
3445 		}
3446 	}
3447 
3448 	return 0;
3449 }
3450 
3451 static int gfx_v12_1_eop_irq(struct amdgpu_device *adev,
3452 			     struct amdgpu_irq_src *source,
3453 			     struct amdgpu_iv_entry *entry)
3454 {
3455 	int i;
3456 	u8 me_id, pipe_id, queue_id;
3457 	struct amdgpu_ring *ring;
3458 	uint32_t mes_queue_id = entry->src_data[0];
3459 
3460 	DRM_DEBUG("IH: CP EOP\n");
3461 
3462 	if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
3463 		struct amdgpu_mes_queue *queue;
3464 
3465 		mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
3466 
3467 		spin_lock(&adev->mes.queue_id_lock);
3468 		queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
3469 		if (queue) {
3470 			DRM_DEBUG("process mes queue id = %d\n", mes_queue_id);
3471 			amdgpu_fence_process(queue->ring);
3472 		}
3473 		spin_unlock(&adev->mes.queue_id_lock);
3474 	} else {
3475 		me_id = (entry->ring_id & 0x0c) >> 2;
3476 		pipe_id = (entry->ring_id & 0x03) >> 0;
3477 		queue_id = (entry->ring_id & 0x70) >> 4;
3478 
3479 		switch (me_id) {
3480 		case 0:
3481 			if (pipe_id == 0)
3482 				amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
3483 			else
3484 				amdgpu_fence_process(&adev->gfx.gfx_ring[1]);
3485 			break;
3486 		case 1:
3487 		case 2:
3488 			for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3489 				ring = &adev->gfx.compute_ring[i];
3490 				/* Per-queue interrupt is supported for MEC starting from VI.
3491 				 * The interrupt can only be enabled/disabled per pipe instead
3492 				 * of per queue.
3493 				 */
3494 				if ((ring->me == me_id) &&
3495 				    (ring->pipe == pipe_id) &&
3496 				    (ring->queue == queue_id))
3497 					amdgpu_fence_process(ring);
3498 			}
3499 			break;
3500 		}
3501 	}
3502 
3503 	return 0;
3504 }
3505 
3506 static int gfx_v12_1_set_priv_reg_fault_state(struct amdgpu_device *adev,
3507 					      struct amdgpu_irq_src *source,
3508 					      unsigned type,
3509 					      enum amdgpu_interrupt_state state)
3510 {
3511 	int i, num_xcc;
3512 
3513 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3514 	switch (state) {
3515 	case AMDGPU_IRQ_STATE_DISABLE:
3516 	case AMDGPU_IRQ_STATE_ENABLE:
3517 		for (i = 0; i < num_xcc; i++)
3518 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3519 					      PRIV_REG_INT_ENABLE,
3520 					      state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3521 		break;
3522 	default:
3523 		break;
3524 	}
3525 
3526 	return 0;
3527 }
3528 
3529 static int gfx_v12_1_set_priv_inst_fault_state(struct amdgpu_device *adev,
3530 					       struct amdgpu_irq_src *source,
3531 					       unsigned type,
3532 					       enum amdgpu_interrupt_state state)
3533 {
3534 	int i, num_xcc;
3535 
3536 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3537 	switch (state) {
3538 	case AMDGPU_IRQ_STATE_DISABLE:
3539 	case AMDGPU_IRQ_STATE_ENABLE:
3540 		for (i = 0; i < num_xcc; i++)
3541 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3542 				       PRIV_INSTR_INT_ENABLE,
3543 				       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3544 		break;
3545 	default:
3546 		break;
3547 	}
3548 
3549 	return 0;
3550 }
3551 
3552 static void gfx_v12_1_handle_priv_fault(struct amdgpu_device *adev,
3553 					struct amdgpu_iv_entry *entry)
3554 {
3555 	u8 me_id, pipe_id, queue_id;
3556 	struct amdgpu_ring *ring;
3557 	int i;
3558 
3559 	me_id = (entry->ring_id & 0x0c) >> 2;
3560 	pipe_id = (entry->ring_id & 0x03) >> 0;
3561 	queue_id = (entry->ring_id & 0x70) >> 4;
3562 
3563 	switch (me_id) {
3564 	case 0:
3565 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3566 			ring = &adev->gfx.gfx_ring[i];
3567 			/* we only enabled 1 gfx queue per pipe for now */
3568 			if (ring->me == me_id && ring->pipe == pipe_id)
3569 				drm_sched_fault(&ring->sched);
3570 		}
3571 		break;
3572 	case 1:
3573 	case 2:
3574 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3575 			ring = &adev->gfx.compute_ring[i];
3576 			if (ring->me == me_id && ring->pipe == pipe_id &&
3577 			    ring->queue == queue_id)
3578 				drm_sched_fault(&ring->sched);
3579 		}
3580 		break;
3581 	default:
3582 		BUG();
3583 		break;
3584 	}
3585 }
3586 
3587 static int gfx_v12_1_priv_reg_irq(struct amdgpu_device *adev,
3588 				  struct amdgpu_irq_src *source,
3589 				  struct amdgpu_iv_entry *entry)
3590 {
3591 	DRM_ERROR("Illegal register access in command stream\n");
3592 	gfx_v12_1_handle_priv_fault(adev, entry);
3593 	return 0;
3594 }
3595 
3596 static int gfx_v12_1_priv_inst_irq(struct amdgpu_device *adev,
3597 				   struct amdgpu_irq_src *source,
3598 				   struct amdgpu_iv_entry *entry)
3599 {
3600 	DRM_ERROR("Illegal instruction in command stream\n");
3601 	gfx_v12_1_handle_priv_fault(adev, entry);
3602 	return 0;
3603 }
3604 
3605 static void gfx_v12_1_emit_mem_sync(struct amdgpu_ring *ring)
3606 {
3607 	const unsigned int gcr_cntl =
3608 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) |
3609 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) |
3610 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) |
3611 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) |
3612 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1) |
3613 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_SCOPE(2);
3614 
3615 	/* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */
3616 	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6));
3617 	amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */
3618 	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
3619 	amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
3620 	amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
3621 	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
3622 	amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
3623 	amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
3624 }
3625 
3626 static const struct amd_ip_funcs gfx_v12_1_ip_funcs = {
3627 	.name = "gfx_v12_1",
3628 	.early_init = gfx_v12_1_early_init,
3629 	.late_init = gfx_v12_1_late_init,
3630 	.sw_init = gfx_v12_1_sw_init,
3631 	.sw_fini = gfx_v12_1_sw_fini,
3632 	.hw_init = gfx_v12_1_hw_init,
3633 	.hw_fini = gfx_v12_1_hw_fini,
3634 	.suspend = gfx_v12_1_suspend,
3635 	.resume = gfx_v12_1_resume,
3636 	.is_idle = gfx_v12_1_is_idle,
3637 	.wait_for_idle = gfx_v12_1_wait_for_idle,
3638 	.set_clockgating_state = gfx_v12_1_set_clockgating_state,
3639 	.set_powergating_state = gfx_v12_1_set_powergating_state,
3640 	.get_clockgating_state = gfx_v12_1_get_clockgating_state,
3641 };
3642 
3643 static const struct amdgpu_ring_funcs gfx_v12_1_ring_funcs_compute = {
3644 	.type = AMDGPU_RING_TYPE_COMPUTE,
3645 	.align_mask = 0xff,
3646 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
3647 	.support_64bit_ptrs = true,
3648 	.get_rptr = gfx_v12_1_ring_get_rptr_compute,
3649 	.get_wptr = gfx_v12_1_ring_get_wptr_compute,
3650 	.set_wptr = gfx_v12_1_ring_set_wptr_compute,
3651 	.emit_frame_size =
3652 		7 + /* gfx_v12_1_ring_emit_pipeline_sync */
3653 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
3654 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
3655 		2 + /* gfx_v12_1_ring_emit_vm_flush */
3656 		8 + 8 + 8 + /* gfx_v12_1_ring_emit_fence x3 for user fence, vm fence */
3657 		8, /* gfx_v12_1_emit_mem_sync */
3658 	.emit_ib_size =	7, /* gfx_v12_1_ring_emit_ib_compute */
3659 	.emit_ib = gfx_v12_1_ring_emit_ib_compute,
3660 	.emit_fence = gfx_v12_1_ring_emit_fence,
3661 	.emit_pipeline_sync = gfx_v12_1_ring_emit_pipeline_sync,
3662 	.emit_vm_flush = gfx_v12_1_ring_emit_vm_flush,
3663 	.test_ring = gfx_v12_1_ring_test_ring,
3664 	.test_ib = gfx_v12_1_ring_test_ib,
3665 	.insert_nop = amdgpu_ring_insert_nop,
3666 	.pad_ib = amdgpu_ring_generic_pad_ib,
3667 	.emit_wreg = gfx_v12_1_ring_emit_wreg,
3668 	.emit_reg_wait = gfx_v12_1_ring_emit_reg_wait,
3669 	.emit_reg_write_reg_wait = gfx_v12_1_ring_emit_reg_write_reg_wait,
3670 	.emit_mem_sync = gfx_v12_1_emit_mem_sync,
3671 };
3672 
3673 static const struct amdgpu_ring_funcs gfx_v12_1_ring_funcs_kiq = {
3674 	.type = AMDGPU_RING_TYPE_KIQ,
3675 	.align_mask = 0xff,
3676 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
3677 	.support_64bit_ptrs = true,
3678 	.get_rptr = gfx_v12_1_ring_get_rptr_compute,
3679 	.get_wptr = gfx_v12_1_ring_get_wptr_compute,
3680 	.set_wptr = gfx_v12_1_ring_set_wptr_compute,
3681 	.emit_frame_size =
3682 		7 + /* gfx_v12_1_ring_emit_pipeline_sync */
3683 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
3684 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
3685 		2 + /* gfx_v12_1_ring_emit_vm_flush */
3686 		8 + 8 + 8, /* gfx_v12_1_ring_emit_fence_kiq x3 for user fence, vm fence */
3687 	.emit_ib_size =	7, /* gfx_v12_1_ring_emit_ib_compute */
3688 	.emit_ib = gfx_v12_1_ring_emit_ib_compute,
3689 	.emit_fence = gfx_v12_1_ring_emit_fence_kiq,
3690 	.test_ring = gfx_v12_1_ring_test_ring,
3691 	.test_ib = gfx_v12_1_ring_test_ib,
3692 	.insert_nop = amdgpu_ring_insert_nop,
3693 	.pad_ib = amdgpu_ring_generic_pad_ib,
3694 	.emit_rreg = gfx_v12_1_ring_emit_rreg,
3695 	.emit_wreg = gfx_v12_1_ring_emit_wreg,
3696 	.emit_reg_wait = gfx_v12_1_ring_emit_reg_wait,
3697 	.emit_reg_write_reg_wait = gfx_v12_1_ring_emit_reg_write_reg_wait,
3698 };
3699 
3700 static void gfx_v12_1_set_ring_funcs(struct amdgpu_device *adev)
3701 {
3702 	int i, j, num_xcc;
3703 
3704 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3705 	for (i = 0; i < num_xcc; i++) {
3706 		adev->gfx.kiq[i].ring.funcs = &gfx_v12_1_ring_funcs_kiq;
3707 
3708 		for (j = 0; j < adev->gfx.num_compute_rings; j++)
3709 			adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs =
3710 						&gfx_v12_1_ring_funcs_compute;
3711 	}
3712 }
3713 
3714 static const struct amdgpu_irq_src_funcs gfx_v12_1_eop_irq_funcs = {
3715 	.set = gfx_v12_1_set_eop_interrupt_state,
3716 	.process = gfx_v12_1_eop_irq,
3717 };
3718 
3719 static const struct amdgpu_irq_src_funcs gfx_v12_1_priv_reg_irq_funcs = {
3720 	.set = gfx_v12_1_set_priv_reg_fault_state,
3721 	.process = gfx_v12_1_priv_reg_irq,
3722 };
3723 
3724 static const struct amdgpu_irq_src_funcs gfx_v12_1_priv_inst_irq_funcs = {
3725 	.set = gfx_v12_1_set_priv_inst_fault_state,
3726 	.process = gfx_v12_1_priv_inst_irq,
3727 };
3728 
3729 static void gfx_v12_1_set_irq_funcs(struct amdgpu_device *adev)
3730 {
3731 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
3732 	adev->gfx.eop_irq.funcs = &gfx_v12_1_eop_irq_funcs;
3733 
3734 	adev->gfx.priv_reg_irq.num_types = 1;
3735 	adev->gfx.priv_reg_irq.funcs = &gfx_v12_1_priv_reg_irq_funcs;
3736 
3737 	adev->gfx.priv_inst_irq.num_types = 1;
3738 	adev->gfx.priv_inst_irq.funcs = &gfx_v12_1_priv_inst_irq_funcs;
3739 }
3740 
3741 static void gfx_v12_1_set_imu_funcs(struct amdgpu_device *adev)
3742 {
3743 #if 0
3744 	if (adev->flags & AMD_IS_APU)
3745 		adev->gfx.imu.mode = MISSION_MODE;
3746 	else
3747 		adev->gfx.imu.mode = DEBUG_MODE;
3748 
3749 	adev->gfx.imu.funcs = &gfx_v12_0_imu_funcs;
3750 #endif
3751 }
3752 
3753 static void gfx_v12_1_set_rlc_funcs(struct amdgpu_device *adev)
3754 {
3755 	adev->gfx.rlc.funcs = &gfx_v12_1_rlc_funcs;
3756 }
3757 
3758 static void gfx_v12_1_set_mqd_funcs(struct amdgpu_device *adev)
3759 {
3760 	/* set compute eng mqd */
3761 	adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size =
3762 		sizeof(struct v12_1_compute_mqd);
3763 	adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd =
3764 		gfx_v12_1_compute_mqd_init;
3765 }
3766 
3767 static void gfx_v12_1_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev,
3768 							  u32 bitmap, int xcc_id)
3769 {
3770 	u32 data;
3771 
3772 	if (!bitmap)
3773 		return;
3774 
3775 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
3776 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
3777 
3778 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data);
3779 }
3780 
3781 static u32 gfx_v12_1_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev,
3782 						  int xcc_id)
3783 {
3784 	u32 data, wgp_bitmask;
3785 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG);
3786 	data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG);
3787 
3788 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
3789 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
3790 
3791 	wgp_bitmask =
3792 		amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1);
3793 
3794 	return (~data) & wgp_bitmask;
3795 }
3796 
3797 static u32 gfx_v12_1_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev,
3798 						 int xcc_id)
3799 {
3800 	u32 wgp_idx, wgp_active_bitmap;
3801 	u32 cu_bitmap_per_wgp, cu_active_bitmap;
3802 
3803 	wgp_active_bitmap = gfx_v12_1_get_wgp_active_bitmap_per_sh(adev, xcc_id);
3804 	cu_active_bitmap = 0;
3805 
3806 	for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) {
3807 		/* if there is one WGP enabled, it means 2 CUs will be enabled */
3808 		cu_bitmap_per_wgp = 3 << (2 * wgp_idx);
3809 		if (wgp_active_bitmap & (1 << wgp_idx))
3810 			cu_active_bitmap |= cu_bitmap_per_wgp;
3811 	}
3812 
3813 	return cu_active_bitmap;
3814 }
3815 
3816 static int gfx_v12_1_get_cu_info(struct amdgpu_device *adev,
3817 				 struct amdgpu_cu_info *cu_info)
3818 {
3819 	int i, j, k, counter, xcc_id, active_cu_number = 0;
3820 	u32 mask, bitmap;
3821 	unsigned disable_masks[8 * 2];
3822 
3823 	if (!adev || !cu_info)
3824 		return -EINVAL;
3825 
3826 	amdgpu_gfx_parse_disable_cu(disable_masks, 8, 2);
3827 
3828 	mutex_lock(&adev->grbm_idx_mutex);
3829 	for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
3830 		for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3831 			for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3832 				bitmap = i * adev->gfx.config.max_sh_per_se + j;
3833 				if (!((gfx_v12_1_get_sa_active_bitmap(adev, xcc_id) >> bitmap) & 1))
3834 					continue;
3835 				mask = 1;
3836 				counter = 0;
3837 				gfx_v12_1_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id);
3838 				if (i < 8 && j < 2)
3839 					gfx_v12_1_set_user_wgp_inactive_bitmap_per_sh(
3840 						adev, disable_masks[i * 2 + j], xcc_id);
3841 				bitmap = gfx_v12_1_get_cu_active_bitmap_per_sh(adev, xcc_id);
3842 
3843 				/**
3844 				 * GFX12 could support more than 4 SEs, while the bitmap
3845 				 * in cu_info struct is 4x4 and ioctl interface struct
3846 				 * drm_amdgpu_info_device should keep stable.
3847 				 * So we use last two columns of bitmap to store cu mask for
3848 				 * SEs 4 to 7, the layout of the bitmap is as below:
3849 				 *    SE0: {SH0,SH1} --> {bitmap[0][0], bitmap[0][1]}
3850 				 *    SE1: {SH0,SH1} --> {bitmap[1][0], bitmap[1][1]}
3851 				 *    SE2: {SH0,SH1} --> {bitmap[2][0], bitmap[2][1]}
3852 				 *    SE3: {SH0,SH1} --> {bitmap[3][0], bitmap[3][1]}
3853 				 *    SE4: {SH0,SH1} --> {bitmap[0][2], bitmap[0][3]}
3854 				 *    SE5: {SH0,SH1} --> {bitmap[1][2], bitmap[1][3]}
3855 				 *    SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]}
3856 				 *    SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]}
3857 				 */
3858 				cu_info->bitmap[0][i % 4][j + (i / 4) * 2] = bitmap;
3859 
3860 				for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
3861 					if (bitmap & mask)
3862 						counter++;
3863 
3864 					mask <<= 1;
3865 				}
3866 				active_cu_number += counter;
3867 			}
3868 		}
3869 		gfx_v12_1_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, xcc_id);
3870 	}
3871 	mutex_unlock(&adev->grbm_idx_mutex);
3872 
3873 	cu_info->number = active_cu_number;
3874 	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
3875 	cu_info->lds_size = 320;
3876 
3877 	return 0;
3878 }
3879 
3880 const struct amdgpu_ip_block_version gfx_v12_1_ip_block = {
3881 	.type = AMD_IP_BLOCK_TYPE_GFX,
3882 	.major = 12,
3883 	.minor = 1,
3884 	.rev = 0,
3885 	.funcs = &gfx_v12_1_ip_funcs,
3886 };
3887 
3888 static int gfx_v12_1_xcp_resume(void *handle, uint32_t inst_mask)
3889 {
3890 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3891 	uint32_t tmp_mask;
3892 	int i, r;
3893 
3894 	/* TODO : Initialize golden regs */
3895 	/* gfx_v12_1_init_golden_registers(adev); */
3896 
3897 	tmp_mask = inst_mask;
3898 	for_each_inst(i, tmp_mask)
3899 		gfx_v12_1_xcc_constants_init(adev, i);
3900 
3901 	if (!amdgpu_sriov_vf(adev)) {
3902 		tmp_mask = inst_mask;
3903 		for_each_inst(i, tmp_mask) {
3904 			r = gfx_v12_1_xcc_rlc_resume(adev, i);
3905 			if (r)
3906 				return r;
3907 		}
3908 	}
3909 
3910 	tmp_mask = inst_mask;
3911 	for_each_inst(i, tmp_mask) {
3912 		r = gfx_v12_1_xcc_cp_resume(adev, i);
3913 		if (r)
3914 			return r;
3915 	}
3916 
3917 	return 0;
3918 }
3919 
3920 static int gfx_v12_1_xcp_suspend(void *handle, uint32_t inst_mask)
3921 {
3922 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3923 	int i;
3924 
3925 	for_each_inst(i, inst_mask)
3926 		gfx_v12_1_xcc_fini(adev, i);
3927 
3928 	return 0;
3929 }
3930 
3931 struct amdgpu_xcp_ip_funcs gfx_v12_1_xcp_funcs = {
3932 	.suspend = &gfx_v12_1_xcp_suspend,
3933 	.resume = &gfx_v12_1_xcp_resume
3934 };
3935