xref: /linux/drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c (revision 00e08fb2e7ce88e2ae366cbc79997d71d014b0ac)
1 /*
2  * Copyright 2025 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/delay.h>
24 #include <linux/kernel.h>
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include "amdgpu.h"
29 #include "amdgpu_gfx.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_smu.h"
32 #include "amdgpu_atomfirmware.h"
33 #include "imu_v12_1.h"
34 #include "soc_v1_0.h"
35 #include "gfx_v12_1_pkt.h"
36 
37 #include "gc/gc_12_1_0_offset.h"
38 #include "gc/gc_12_1_0_sh_mask.h"
39 #include "soc24_enum.h"
40 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
41 
42 #include "soc15.h"
43 #include "clearstate_gfx12.h"
44 #include "v12_structs.h"
45 #include "gfx_v12_1.h"
46 #include "mes_v12_1.h"
47 
48 #define GFX12_MEC_HPD_SIZE	2048
49 
50 #define RLCG_UCODE_LOADING_START_ADDRESS	0x00002000L
51 
52 MODULE_FIRMWARE("amdgpu/gc_12_1_0_mec.bin");
53 MODULE_FIRMWARE("amdgpu/gc_12_1_0_rlc.bin");
54 
55 #define SH_MEM_ALIGNMENT_MODE_UNALIGNED_GFX12_1_0	0x00000001
56 #define DEFAULT_SH_MEM_CONFIG \
57 	((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
58 	 (SH_MEM_ALIGNMENT_MODE_UNALIGNED_GFX12_1_0 << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
59 	 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
60 
61 static void gfx_v12_1_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id);
62 static void gfx_v12_1_set_ring_funcs(struct amdgpu_device *adev);
63 static void gfx_v12_1_set_irq_funcs(struct amdgpu_device *adev);
64 static void gfx_v12_1_set_rlc_funcs(struct amdgpu_device *adev);
65 static void gfx_v12_1_set_mqd_funcs(struct amdgpu_device *adev);
66 static void gfx_v12_1_set_imu_funcs(struct amdgpu_device *adev);
67 static int gfx_v12_1_get_cu_info(struct amdgpu_device *adev,
68 				 struct amdgpu_cu_info *cu_info);
69 static uint64_t gfx_v12_1_get_gpu_clock_counter(struct amdgpu_device *adev);
70 static void gfx_v12_1_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num,
71 				       u32 sh_num, u32 instance, int xcc_id);
72 static u32 gfx_v12_1_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev,
73 						  int xcc_id);
74 
75 static void gfx_v12_1_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
76 				     uint32_t val);
77 static int gfx_v12_1_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
78 static void gfx_v12_1_ring_invalidate_tlbs(struct amdgpu_ring *ring,
79 					   uint16_t pasid, uint32_t flush_type,
80 					   bool all_hub, uint8_t dst_sel);
81 static void gfx_v12_1_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
82 static void gfx_v12_1_xcc_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
83 static void gfx_v12_1_update_perf_clk(struct amdgpu_device *adev,
84 				      bool enable);
85 static void gfx_v12_1_xcc_update_perf_clk(struct amdgpu_device *adev,
86 					 bool enable, int xcc_id);
87 
88 static void gfx_v12_1_kiq_set_resources(struct amdgpu_ring *kiq_ring,
89 					uint64_t queue_mask)
90 {
91 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
92 	amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
93 			  PACKET3_SET_RESOURCES_QUEUE_TYPE(0));	/* vmid_mask:0 queue_type:0 (KIQ) */
94 	amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask));	/* queue mask lo */
95 	amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask));	/* queue mask hi */
96 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask lo */
97 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask hi */
98 	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
99 	amdgpu_ring_write(kiq_ring, 0);
100 }
101 
102 static void gfx_v12_1_kiq_map_queues(struct amdgpu_ring *kiq_ring,
103 				     struct amdgpu_ring *ring)
104 {
105 	uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
106 	uint64_t wptr_addr = ring->wptr_gpu_addr;
107 	uint32_t me = 0, eng_sel = 0;
108 
109 	switch (ring->funcs->type) {
110 	case AMDGPU_RING_TYPE_COMPUTE:
111 		me = 1;
112 		eng_sel = 0;
113 		break;
114 	case AMDGPU_RING_TYPE_MES:
115 		me = 2;
116 		eng_sel = 5;
117 		break;
118 	default:
119 		WARN_ON(1);
120 	}
121 
122 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
123 	/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
124 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
125 			  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
126 			  PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
127 			  PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
128 			  PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
129 			  PACKET3_MAP_QUEUES_ME((me)) |
130 			  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
131 			  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
132 			  PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
133 			  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
134 	amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
135 	amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
136 	amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
137 	amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
138 	amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
139 }
140 
141 static void gfx_v12_1_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
142 				       struct amdgpu_ring *ring,
143 				       enum amdgpu_unmap_queues_action action,
144 				       u64 gpu_addr, u64 seq)
145 {
146 	struct amdgpu_device *adev = kiq_ring->adev;
147 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
148 
149 	if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
150 		amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr,
151 					      seq, kiq_ring->xcc_id);
152 		return;
153 	}
154 
155 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
156 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
157 			  PACKET3_UNMAP_QUEUES_ACTION(action) |
158 			  PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
159 			  PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
160 			  PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
161 	amdgpu_ring_write(kiq_ring,
162 		  PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
163 
164 	if (action == PREEMPT_QUEUES_NO_UNMAP) {
165 		amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
166 		amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
167 		amdgpu_ring_write(kiq_ring, seq);
168 	} else {
169 		amdgpu_ring_write(kiq_ring, 0);
170 		amdgpu_ring_write(kiq_ring, 0);
171 		amdgpu_ring_write(kiq_ring, 0);
172 	}
173 }
174 
175 static void gfx_v12_1_kiq_query_status(struct amdgpu_ring *kiq_ring,
176 				       struct amdgpu_ring *ring,
177 				       u64 addr, u64 seq)
178 {
179 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
180 
181 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
182 	amdgpu_ring_write(kiq_ring,
183 			  PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
184 			  PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
185 			  PACKET3_QUERY_STATUS_COMMAND(2));
186 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
187 			  PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
188 			  PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
189 	amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
190 	amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
191 	amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
192 	amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
193 }
194 
195 static void gfx_v12_1_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
196 					  uint16_t pasid,
197 					  uint32_t flush_type,
198 					  bool all_hub)
199 {
200 	gfx_v12_1_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1);
201 }
202 
203 static const struct kiq_pm4_funcs gfx_v12_1_kiq_pm4_funcs = {
204 	.kiq_set_resources = gfx_v12_1_kiq_set_resources,
205 	.kiq_map_queues = gfx_v12_1_kiq_map_queues,
206 	.kiq_unmap_queues = gfx_v12_1_kiq_unmap_queues,
207 	.kiq_query_status = gfx_v12_1_kiq_query_status,
208 	.kiq_invalidate_tlbs = gfx_v12_1_kiq_invalidate_tlbs,
209 	.set_resources_size = 8,
210 	.map_queues_size = 7,
211 	.unmap_queues_size = 6,
212 	.query_status_size = 7,
213 	.invalidate_tlbs_size = 2,
214 };
215 
216 static void gfx_v12_1_set_kiq_pm4_funcs(struct amdgpu_device *adev)
217 {
218 	int i, num_xcc;
219 
220 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
221 	for (i =0; i < num_xcc; i++)
222 		adev->gfx.kiq[i].pmf = &gfx_v12_1_kiq_pm4_funcs;
223 }
224 
225 static void gfx_v12_1_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
226 				   int mem_space, int opt, uint32_t addr0,
227 				   uint32_t addr1, uint32_t ref,
228 				   uint32_t mask, uint32_t inv)
229 {
230 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
231 	amdgpu_ring_write(ring,
232 			  /* memory (1) or register (0) */
233 			  (WAIT_REG_MEM_MEM_SPACE(mem_space) |
234 			   WAIT_REG_MEM_OPERATION(opt) | /* wait */
235 			   WAIT_REG_MEM_FUNCTION(3) |  /* equal */
236 			   WAIT_REG_MEM_ENGINE(eng_sel)));
237 
238 	if (mem_space)
239 		BUG_ON(addr0 & 0x3); /* Dword align */
240 	amdgpu_ring_write(ring, addr0);
241 	amdgpu_ring_write(ring, addr1);
242 	amdgpu_ring_write(ring, ref);
243 	amdgpu_ring_write(ring, mask);
244 	amdgpu_ring_write(ring, inv); /* poll interval */
245 }
246 
247 static int gfx_v12_1_ring_test_ring(struct amdgpu_ring *ring)
248 {
249 	struct amdgpu_device *adev = ring->adev;
250 	uint32_t scratch_reg0_offset, xcc_offset;
251 	uint32_t tmp = 0;
252 	unsigned i;
253 	int r;
254 
255 	/* Use register offset which is local to XCC in the packet */
256 	xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
257 	scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0);
258 	WREG32(scratch_reg0_offset, 0xCAFEDEAD);
259 	tmp = RREG32(scratch_reg0_offset);
260 
261 	r = amdgpu_ring_alloc(ring, 5);
262 	if (r) {
263 		dev_err(adev->dev,
264 			"amdgpu: cp failed to lock ring %d (%d).\n",
265 			ring->idx, r);
266 		return r;
267 	}
268 
269 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
270 		gfx_v12_1_ring_emit_wreg(ring, xcc_offset, 0xDEADBEEF);
271 	} else {
272 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
273 		amdgpu_ring_write(ring, xcc_offset -
274 				  PACKET3_SET_UCONFIG_REG_START);
275 		amdgpu_ring_write(ring, 0xDEADBEEF);
276 	}
277 	amdgpu_ring_commit(ring);
278 
279 	for (i = 0; i < adev->usec_timeout; i++) {
280 		tmp = RREG32(scratch_reg0_offset);
281 		if (tmp == 0xDEADBEEF)
282 			break;
283 		if (amdgpu_emu_mode == 1)
284 			msleep(1);
285 		else
286 			udelay(1);
287 	}
288 
289 	if (i >= adev->usec_timeout)
290 		r = -ETIMEDOUT;
291 	return r;
292 }
293 
294 static int gfx_v12_1_ring_test_ib(struct amdgpu_ring *ring, long timeout)
295 {
296 	struct amdgpu_device *adev = ring->adev;
297 	struct amdgpu_ib ib;
298 	struct dma_fence *f = NULL;
299 	unsigned index;
300 	uint64_t gpu_addr;
301 	volatile uint32_t *cpu_ptr;
302 	long r;
303 
304 	/* MES KIQ fw hasn't indirect buffer support for now */
305 	if (adev->enable_mes_kiq &&
306 	    ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
307 		return 0;
308 
309 	memset(&ib, 0, sizeof(ib));
310 
311 	r = amdgpu_device_wb_get(adev, &index);
312 	if (r)
313 		return r;
314 
315 	gpu_addr = adev->wb.gpu_addr + (index * 4);
316 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
317 	cpu_ptr = &adev->wb.wb[index];
318 
319 	r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
320 	if (r) {
321 		dev_err(adev->dev, "amdgpu: failed to get ib (%ld).\n", r);
322 		goto err1;
323 	}
324 
325 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
326 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
327 	ib.ptr[2] = lower_32_bits(gpu_addr);
328 	ib.ptr[3] = upper_32_bits(gpu_addr);
329 	ib.ptr[4] = 0xDEADBEEF;
330 	ib.length_dw = 5;
331 
332 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
333 	if (r)
334 		goto err2;
335 
336 	r = dma_fence_wait_timeout(f, false, timeout);
337 	if (r == 0) {
338 		r = -ETIMEDOUT;
339 		goto err2;
340 	} else if (r < 0) {
341 		goto err2;
342 	}
343 
344 	if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF)
345 		r = 0;
346 	else
347 		r = -EINVAL;
348 err2:
349 	amdgpu_ib_free(&ib, NULL);
350 	dma_fence_put(f);
351 err1:
352 	amdgpu_device_wb_free(adev, index);
353 	return r;
354 }
355 
356 static void gfx_v12_1_free_microcode(struct amdgpu_device *adev)
357 {
358 	amdgpu_ucode_release(&adev->gfx.rlc_fw);
359 	amdgpu_ucode_release(&adev->gfx.mec_fw);
360 
361 	kfree(adev->gfx.rlc.register_list_format);
362 }
363 
364 static int gfx_v12_1_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix)
365 {
366 	const struct psp_firmware_header_v1_0 *toc_hdr;
367 	int err = 0;
368 
369 	err = amdgpu_ucode_request(adev, &adev->psp.toc_fw,
370 				   AMDGPU_UCODE_REQUIRED,
371 				   "amdgpu/%s_toc.bin", ucode_prefix);
372 	if (err)
373 		goto out;
374 
375 	toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
376 	adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
377 	adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
378 	adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
379 	adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
380 			le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
381 	return 0;
382 out:
383 	amdgpu_ucode_release(&adev->psp.toc_fw);
384 	return err;
385 }
386 
387 static int gfx_v12_1_init_microcode(struct amdgpu_device *adev)
388 {
389 	char ucode_prefix[15];
390 	int err;
391 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
392 	uint16_t version_major;
393 	uint16_t version_minor;
394 
395 	DRM_DEBUG("\n");
396 
397 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
398 
399 	if (!amdgpu_sriov_vf(adev)) {
400 		err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
401 					   AMDGPU_UCODE_REQUIRED,
402 					   "amdgpu/%s_rlc.bin", ucode_prefix);
403 		if (err)
404 			goto out;
405 		rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
406 		version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
407 		version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
408 		err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
409 		if (err)
410 			goto out;
411 	}
412 
413 	err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
414 				   AMDGPU_UCODE_REQUIRED,
415 				   "amdgpu/%s_mec.bin", ucode_prefix);
416 	if (err)
417 		goto out;
418 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC);
419 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK);
420 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK);
421 
422 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
423 		err = gfx_v12_1_init_toc_microcode(adev, ucode_prefix);
424 
425 	/* only one MEC for gfx 12 */
426 	adev->gfx.mec2_fw = NULL;
427 
428 	if (adev->gfx.imu.funcs) {
429 		if (adev->gfx.imu.funcs->init_microcode) {
430 			err = adev->gfx.imu.funcs->init_microcode(adev);
431 			if (err)
432 				dev_err(adev->dev, "Failed to load imu firmware!\n");
433 		}
434 	}
435 
436 out:
437 	if (err) {
438 		amdgpu_ucode_release(&adev->gfx.rlc_fw);
439 		amdgpu_ucode_release(&adev->gfx.mec_fw);
440 	}
441 
442 	return err;
443 }
444 
445 static u32 gfx_v12_1_get_csb_size(struct amdgpu_device *adev)
446 {
447 	u32 count = 0;
448 	const struct cs_section_def *sect = NULL;
449 	const struct cs_extent_def *ext = NULL;
450 
451 	count += 1;
452 
453 	for (sect = gfx12_cs_data; sect->section != NULL; ++sect) {
454 		if (sect->id == SECT_CONTEXT) {
455 			for (ext = sect->section; ext->extent != NULL; ++ext)
456 				count += 2 + ext->reg_count;
457 		} else
458 			return 0;
459 	}
460 
461 	return count;
462 }
463 
464 static void gfx_v12_1_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
465 {
466 	u32 count = 0, clustercount = 0, i;
467 	const struct cs_section_def *sect = NULL;
468 	const struct cs_extent_def *ext = NULL;
469 
470 	if (adev->gfx.rlc.cs_data == NULL)
471 		return;
472 	if (buffer == NULL)
473 		return;
474 
475 	count += 1;
476 
477 	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
478 		if (sect->id == SECT_CONTEXT) {
479 			for (ext = sect->section; ext->extent != NULL; ++ext) {
480 				clustercount++;
481 				buffer[count++] = ext->reg_count;
482 				buffer[count++] = ext->reg_index;
483 
484 				for (i = 0; i < ext->reg_count; i++)
485 					buffer[count++] = cpu_to_le32(ext->extent[i]);
486 			}
487 		} else
488 			return;
489 	}
490 
491 	buffer[0] = clustercount;
492 }
493 
494 static void gfx_v12_1_rlc_fini(struct amdgpu_device *adev)
495 {
496 	/* clear state block */
497 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
498 			&adev->gfx.rlc.clear_state_gpu_addr,
499 			(void **)&adev->gfx.rlc.cs_ptr);
500 
501 	/* jump table block */
502 	amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
503 			&adev->gfx.rlc.cp_table_gpu_addr,
504 			(void **)&adev->gfx.rlc.cp_table_ptr);
505 }
506 
507 static void gfx_v12_1_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
508 {
509 	int xcc_id, num_xcc;
510 	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
511 
512 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
513 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
514 		reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)];
515 		reg_access_ctrl->scratch_reg0 =
516 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG0);
517 		reg_access_ctrl->scratch_reg1 =
518 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG1);
519 		reg_access_ctrl->scratch_reg2 =
520 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG2);
521 		reg_access_ctrl->scratch_reg3 =
522 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG3);
523 		reg_access_ctrl->grbm_cntl =
524 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL);
525 		reg_access_ctrl->grbm_idx =
526 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX);
527 		reg_access_ctrl->spare_int =
528 			SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT_0);
529 	}
530 	adev->gfx.rlc.rlcg_reg_access_supported = true;
531 }
532 
533 static int gfx_v12_1_rlc_init(struct amdgpu_device *adev)
534 {
535 	const struct cs_section_def *cs_data;
536 	int r, i, num_xcc;
537 
538 	adev->gfx.rlc.cs_data = gfx12_cs_data;
539 
540 	cs_data = adev->gfx.rlc.cs_data;
541 
542 	if (cs_data) {
543 		/* init clear state block */
544 		r = amdgpu_gfx_rlc_init_csb(adev);
545 		if (r)
546 			return r;
547 	}
548 
549 	/* init spm vmid with 0xf */
550 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
551 	for (i = 0; i < num_xcc; i++) {
552 		if (adev->gfx.rlc.funcs->update_spm_vmid)
553 			adev->gfx.rlc.funcs->update_spm_vmid(adev, i, NULL, 0xf);
554 	}
555 
556 	return 0;
557 }
558 
559 static void gfx_v12_1_mec_fini(struct amdgpu_device *adev)
560 {
561 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
562 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
563 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL);
564 }
565 
566 static int gfx_v12_1_mec_init(struct amdgpu_device *adev)
567 {
568 	int r, i, num_xcc;
569 	u32 *hpd;
570 	size_t mec_hpd_size;
571 
572 	bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
573 
574 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
575 	for (i = 0; i < num_xcc; i++)
576 		bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap,
577 			    AMDGPU_MAX_COMPUTE_QUEUES);
578 
579 	/* take ownership of the relevant compute queues */
580 	amdgpu_gfx_compute_queue_acquire(adev);
581 	mec_hpd_size = adev->gfx.num_compute_rings *
582 		       GFX12_MEC_HPD_SIZE * num_xcc;
583 
584 	if (mec_hpd_size) {
585 		r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
586 					      AMDGPU_GEM_DOMAIN_GTT,
587 					      &adev->gfx.mec.hpd_eop_obj,
588 					      &adev->gfx.mec.hpd_eop_gpu_addr,
589 					      (void **)&hpd);
590 		if (r) {
591 			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
592 			gfx_v12_1_mec_fini(adev);
593 			return r;
594 		}
595 
596 		memset(hpd, 0, mec_hpd_size);
597 
598 		amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
599 		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
600 	}
601 
602 	return 0;
603 }
604 
605 static uint32_t wave_read_ind(struct amdgpu_device *adev,
606 			      uint32_t xcc_id, uint32_t wave,
607 			      uint32_t address)
608 {
609 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
610 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
611 		(address << SQ_IND_INDEX__INDEX__SHIFT));
612 	return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
613 }
614 
615 static void wave_read_regs(struct amdgpu_device *adev,
616 			   uint32_t xcc_id, uint32_t wave,
617 			   uint32_t thread, uint32_t regno,
618 			   uint32_t num, uint32_t *out)
619 {
620 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
621 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
622 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
623 		(thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
624 		(SQ_IND_INDEX__AUTO_INCR_MASK));
625 	while (num--)
626 		*(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
627 }
628 
629 static void gfx_v12_1_read_wave_data(struct amdgpu_device *adev,
630 				     uint32_t xcc_id,
631 				     uint32_t simd, uint32_t wave,
632 				     uint32_t *dst, int *no_fields)
633 {
634 	/* in gfx12 the SIMD_ID is specified as part of the INSTANCE
635 	 * field when performing a select_se_sh so it should be
636 	 * zero here */
637 	WARN_ON(simd != 0);
638 
639 	/* type 4 wave data */
640 	dst[(*no_fields)++] = 4;
641 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_STATUS);
642 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_PC_LO);
643 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_PC_HI);
644 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXEC_LO);
645 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXEC_HI);
646 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_HW_ID1);
647 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_HW_ID2);
648 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_GPR_ALLOC);
649 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_LDS_ALLOC);
650 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_IB_STS);
651 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_IB_STS2);
652 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_IB_DBG1);
653 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_M0);
654 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_MODE);
655 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_STATE_PRIV);
656 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXCP_FLAG_PRIV);
657 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXCP_FLAG_USER);
658 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_TRAP_CTRL);
659 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_ACTIVE);
660 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_VALID_AND_IDLE);
661 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_DVGPR_ALLOC_LO);
662 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_DVGPR_ALLOC_HI);
663 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_SCHED_MODE);
664 }
665 
666 static void gfx_v12_1_read_wave_sgprs(struct amdgpu_device *adev,
667 				      uint32_t xcc_id, uint32_t simd,
668 				      uint32_t wave, uint32_t start,
669 				      uint32_t size, uint32_t *dst)
670 {
671 	WARN_ON(simd != 0);
672 
673 	wave_read_regs(adev, xcc_id, wave, 0,
674 		       start + SQIND_WAVE_SGPRS_OFFSET,
675 		       size, dst);
676 }
677 
678 static void gfx_v12_1_read_wave_vgprs(struct amdgpu_device *adev,
679 				      uint32_t xcc_id, uint32_t simd,
680 				      uint32_t wave, uint32_t thread,
681 				      uint32_t start, uint32_t size,
682 				      uint32_t *dst)
683 {
684 	wave_read_regs(adev, xcc_id, wave, thread,
685 		       start + SQIND_WAVE_VGPRS_OFFSET,
686 		       size, dst);
687 }
688 
689 static void gfx_v12_1_select_me_pipe_q(struct amdgpu_device *adev,
690 				       u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
691 {
692 	soc_v1_0_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id));
693 }
694 
695 static int gfx_v12_1_get_xccs_per_xcp(struct amdgpu_device *adev)
696 {
697 	/* Fill this in when the interface is ready */
698 	return 1;
699 }
700 
701 static const struct amdgpu_gfx_funcs gfx_v12_1_gfx_funcs = {
702 	.get_gpu_clock_counter = &gfx_v12_1_get_gpu_clock_counter,
703 	.select_se_sh = &gfx_v12_1_xcc_select_se_sh,
704 	.read_wave_data = &gfx_v12_1_read_wave_data,
705 	.read_wave_sgprs = &gfx_v12_1_read_wave_sgprs,
706 	.read_wave_vgprs = &gfx_v12_1_read_wave_vgprs,
707 	.select_me_pipe_q = &gfx_v12_1_select_me_pipe_q,
708 	.update_perfmon_mgcg = &gfx_v12_1_update_perf_clk,
709 	.get_xccs_per_xcp = &gfx_v12_1_get_xccs_per_xcp,
710 };
711 
712 static int gfx_v12_1_gpu_early_init(struct amdgpu_device *adev)
713 {
714 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
715 	case IP_VERSION(12, 1, 0):
716 		adev->gfx.config.max_hw_contexts = 8;
717 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
718 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
719 		adev->gfx.config.sc_hiz_tile_fifo_size = 0;
720 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
721 		break;
722 	default:
723 		BUG();
724 		break;
725 	}
726 
727 	return 0;
728 }
729 
730 static int gfx_v12_1_compute_ring_init(struct amdgpu_device *adev, int ring_id,
731 				       int xcc_id, int mec, int pipe, int queue)
732 {
733 	int r;
734 	unsigned irq_type;
735 	struct amdgpu_ring *ring;
736 	unsigned int hw_prio;
737 	uint32_t xcc_doorbell_start;
738 
739 	ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings +
740 				       ring_id];
741 
742 	/* mec0 is me1 */
743 	ring->xcc_id = xcc_id;
744 	ring->me = mec + 1;
745 	ring->pipe = pipe;
746 	ring->queue = queue;
747 
748 	ring->ring_obj = NULL;
749 	ring->use_doorbell = true;
750 	xcc_doorbell_start = adev->doorbell_index.mec_ring0 +
751 			     xcc_id * adev->doorbell_index.xcc_doorbell_range;
752 	ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1;
753 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr +
754 			     (ring_id + xcc_id * adev->gfx.num_compute_rings) *
755 			     GFX12_MEC_HPD_SIZE;
756 	ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
757 	sprintf(ring->name, "comp_%d.%d.%d.%d",
758 			ring->xcc_id, ring->me, ring->pipe, ring->queue);
759 
760 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
761 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
762 		+ ring->pipe;
763 	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
764 			AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
765 	/* type-2 packets are deprecated on MEC, use type-3 instead */
766 	r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
767 			     hw_prio, NULL);
768 	if (r)
769 		return r;
770 
771 	return 0;
772 }
773 
774 static struct {
775 	SOC24_FIRMWARE_ID	id;
776 	unsigned int		offset;
777 	unsigned int		size;
778 	unsigned int		size_x16;
779 	unsigned int		num_inst;
780 } rlc_autoload_info[SOC24_FIRMWARE_ID_MAX];
781 
782 #define RLC_TOC_OFFSET_DWUNIT   8
783 #define RLC_SIZE_MULTIPLE       1024
784 #define RLC_TOC_UMF_SIZE_inM	23ULL
785 #define RLC_TOC_FORMAT_API	165ULL
786 
787 #define RLC_NUM_INS_CODE0   1
788 #define RLC_NUM_INS_CODE1   8
789 #define RLC_NUM_INS_CODE2   2
790 #define RLC_NUM_INS_CODE3   16
791 
792 static void gfx_v12_1_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc)
793 {
794 	RLC_TABLE_OF_CONTENT_V2 *ucode = rlc_toc;
795 
796 	while (ucode && (ucode->id > SOC24_FIRMWARE_ID_INVALID)) {
797 		rlc_autoload_info[ucode->id].id = ucode->id;
798 		rlc_autoload_info[ucode->id].offset =
799 			ucode->offset * RLC_TOC_OFFSET_DWUNIT * 4;
800 		rlc_autoload_info[ucode->id].size =
801 			ucode->size_x16 ? ucode->size * RLC_SIZE_MULTIPLE * 4 :
802 					  ucode->size * 4;
803 		switch (ucode->vfflr_image_code) {
804 		case 0:
805 			rlc_autoload_info[ucode->id].num_inst =
806 				RLC_NUM_INS_CODE0;
807 			break;
808 		case 1:
809 			rlc_autoload_info[ucode->id].num_inst =
810 				RLC_NUM_INS_CODE1;
811 			break;
812 		case 2:
813 			rlc_autoload_info[ucode->id].num_inst =
814 				RLC_NUM_INS_CODE2;
815 			break;
816 		case 3:
817 			rlc_autoload_info[ucode->id].num_inst =
818 				RLC_NUM_INS_CODE3;
819 			break;
820 		default:
821 			dev_err(adev->dev,
822 				"Invalid Instance number detected\n");
823 			break;
824 		}
825 		ucode++;
826 	}
827 }
828 
829 static uint32_t gfx_v12_1_calc_toc_total_size(struct amdgpu_device *adev)
830 {
831 	uint32_t total_size = 0;
832 	SOC24_FIRMWARE_ID id;
833 
834 	gfx_v12_1_parse_rlc_toc(adev, adev->psp.toc.start_addr);
835 
836 	for (id = SOC24_FIRMWARE_ID_RLC_G_UCODE; id < SOC24_FIRMWARE_ID_MAX; id++)
837 		total_size += rlc_autoload_info[id].size;
838 
839 	/* In case the offset in rlc toc ucode is aligned */
840 	if (total_size < rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].offset)
841 		total_size = rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].offset +
842 			rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].size;
843 	if (total_size < (RLC_TOC_UMF_SIZE_inM << 20))
844 		total_size = RLC_TOC_UMF_SIZE_inM << 20;
845 
846 	return total_size;
847 }
848 
849 static int gfx_v12_1_rlc_autoload_buffer_init(struct amdgpu_device *adev)
850 {
851 	int r;
852 	uint32_t total_size;
853 
854 	total_size = gfx_v12_1_calc_toc_total_size(adev);
855 
856 	r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024,
857 				      AMDGPU_GEM_DOMAIN_VRAM,
858 				      &adev->gfx.rlc.rlc_autoload_bo,
859 				      &adev->gfx.rlc.rlc_autoload_gpu_addr,
860 				      (void **)&adev->gfx.rlc.rlc_autoload_ptr);
861 
862 	if (r) {
863 		dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
864 		return r;
865 	}
866 
867 	return 0;
868 }
869 
870 static void gfx_v12_1_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev,
871 						       SOC24_FIRMWARE_ID id,
872 						       const void *fw_data,
873 						       uint32_t fw_size)
874 {
875 	uint32_t toc_offset;
876 	uint32_t toc_fw_size, toc_fw_inst_size;
877 	char *ptr = adev->gfx.rlc.rlc_autoload_ptr;
878 	int i, num_inst;
879 
880 	if (id <= SOC24_FIRMWARE_ID_INVALID || id >= SOC24_FIRMWARE_ID_MAX)
881 		return;
882 
883 	toc_offset = rlc_autoload_info[id].offset;
884 	toc_fw_size = rlc_autoload_info[id].size;
885 	num_inst = rlc_autoload_info[id].num_inst;
886 	toc_fw_inst_size = toc_fw_size / num_inst;
887 
888 	if (fw_size == 0)
889 		fw_size = toc_fw_inst_size;
890 
891 	if (fw_size > toc_fw_inst_size)
892 		fw_size = toc_fw_inst_size;
893 
894 	for (i = 0; i < num_inst; i++) {
895 		memcpy(ptr + toc_offset + i * toc_fw_inst_size, fw_data, fw_size);
896 
897 		if (fw_size < toc_fw_inst_size)
898 			memset(ptr + toc_offset + fw_size + i * toc_fw_inst_size,
899 			       0, toc_fw_inst_size - fw_size);
900 	}
901 }
902 
903 static void
904 gfx_v12_1_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev)
905 {
906 	void *data;
907 	uint32_t size;
908 	uint32_t *toc_ptr;
909 
910 	data = adev->psp.toc.start_addr;
911 	size = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_TOC].size;
912 
913 	toc_ptr = (uint32_t *)data + size / 4 - 2;
914 	*toc_ptr = (RLC_TOC_FORMAT_API << 24) | 0x1;
915 
916 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_TOC,
917 						   data, size);
918 }
919 
920 static void
921 gfx_v12_1_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev)
922 {
923 	const __le32 *fw_data;
924 	uint32_t fw_size;
925 	const struct gfx_firmware_header_v2_0 *cpv2_hdr;
926 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
927 	const struct rlc_firmware_header_v2_1 *rlcv21_hdr;
928 	const struct rlc_firmware_header_v2_2 *rlcv22_hdr;
929 	uint16_t version_major, version_minor;
930 
931 	/* mec ucode */
932 	cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
933 		adev->gfx.mec_fw->data;
934 	/* instruction */
935 	fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
936 		le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
937 	fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
938 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC,
939 						   fw_data, fw_size);
940 	/* data */
941 	fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
942 		le32_to_cpu(cpv2_hdr->data_offset_bytes));
943 	fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
944 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P0_STACK,
945 						   fw_data, fw_size);
946 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P1_STACK,
947 						   fw_data, fw_size);
948 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P2_STACK,
949 						   fw_data, fw_size);
950 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P3_STACK,
951 						   fw_data, fw_size);
952 
953 	/* rlc ucode */
954 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)
955 		adev->gfx.rlc_fw->data;
956 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
957 			le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes));
958 	fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes);
959 	gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_G_UCODE,
960 						   fw_data, fw_size);
961 
962 	version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
963 	version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
964 	if (version_major == 2) {
965 		if (version_minor >= 1) {
966 			rlcv21_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
967 
968 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
969 					le32_to_cpu(rlcv21_hdr->save_restore_list_gpm_offset_bytes));
970 			fw_size = le32_to_cpu(rlcv21_hdr->save_restore_list_gpm_size_bytes);
971 			gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLCG_SCRATCH,
972 						   fw_data, fw_size);
973 
974 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
975 					le32_to_cpu(rlcv21_hdr->save_restore_list_srm_offset_bytes));
976 			fw_size = le32_to_cpu(rlcv21_hdr->save_restore_list_srm_size_bytes);
977 			gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_SRM_ARAM,
978 						   fw_data, fw_size);
979 		}
980 		if (version_minor >= 2) {
981 			rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
982 
983 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
984 					le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes));
985 			fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes);
986 			gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLX6_UCODE,
987 						   fw_data, fw_size);
988 
989 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
990 					le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes));
991 			fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes);
992 			gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLX6_DRAM_BOOT,
993 						   fw_data, fw_size);
994 		}
995 	}
996 }
997 
998 static void
999 gfx_v12_1_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev)
1000 {
1001 	const __le32 *fw_data;
1002 	uint32_t fw_size;
1003 	const struct sdma_firmware_header_v3_0 *sdma_hdr;
1004 
1005 	if (adev->sdma.instance[0].fw) {
1006 		sdma_hdr = (const struct sdma_firmware_header_v3_0 *)
1007 			adev->sdma.instance[0].fw->data;
1008 		fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1009 				le32_to_cpu(sdma_hdr->ucode_offset_bytes));
1010 		fw_size = le32_to_cpu(sdma_hdr->ucode_size_bytes);
1011 
1012 		gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_SDMA_UCODE_TH0,
1013 							   fw_data, fw_size);
1014 	}
1015 }
1016 
1017 static void
1018 gfx_v12_1_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev)
1019 {
1020 	const __le32 *fw_data;
1021 	unsigned fw_size;
1022 	const struct mes_firmware_header_v1_0 *mes_hdr;
1023 	int pipe, ucode_id, data_id;
1024 
1025 	for (pipe = 0; pipe < 2; pipe++) {
1026 		if (pipe == 0) {
1027 			ucode_id = SOC24_FIRMWARE_ID_RS64_MES_P0;
1028 			data_id  = SOC24_FIRMWARE_ID_RS64_MES_P0_STACK;
1029 		} else {
1030 			ucode_id = SOC24_FIRMWARE_ID_RS64_MES_P1;
1031 			data_id  = SOC24_FIRMWARE_ID_RS64_MES_P1_STACK;
1032 		}
1033 
1034 		mes_hdr = (const struct mes_firmware_header_v1_0 *)
1035 			adev->mes.fw[pipe]->data;
1036 
1037 		fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1038 				le32_to_cpu(mes_hdr->mes_ucode_offset_bytes));
1039 		fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
1040 
1041 		gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, ucode_id, fw_data, fw_size);
1042 
1043 		fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1044 				le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
1045 		fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
1046 
1047 		gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, data_id, fw_data, fw_size);
1048 	}
1049 }
1050 
1051 static int gfx_v12_1_rlc_backdoor_autoload_enable(struct amdgpu_device *adev)
1052 {
1053 	uint32_t rlc_g_offset, rlc_g_size;
1054 	uint64_t gpu_addr;
1055 	uint32_t data;
1056 
1057 	/* RLC autoload sequence 2: copy ucode */
1058 	gfx_v12_1_rlc_backdoor_autoload_copy_sdma_ucode(adev);
1059 	gfx_v12_1_rlc_backdoor_autoload_copy_gfx_ucode(adev);
1060 	gfx_v12_1_rlc_backdoor_autoload_copy_mes_ucode(adev);
1061 	gfx_v12_1_rlc_backdoor_autoload_copy_toc_ucode(adev);
1062 
1063 	rlc_g_offset = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_G_UCODE].offset;
1064 	rlc_g_size = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_G_UCODE].size;
1065 	gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset - adev->gmc.vram_start;
1066 
1067 	WREG32_SOC15(GC, GET_INST(GC, 0),
1068 		     regGFX_IMU_RLC_BOOTLOADER_ADDR_HI, upper_32_bits(gpu_addr));
1069 	WREG32_SOC15(GC, GET_INST(GC, 0),
1070 		     regGFX_IMU_RLC_BOOTLOADER_ADDR_LO, lower_32_bits(gpu_addr));
1071 
1072 	WREG32_SOC15(GC, GET_INST(GC, 0),
1073 		     regGFX_IMU_RLC_BOOTLOADER_SIZE, rlc_g_size);
1074 
1075 	if (adev->gfx.imu.funcs) {
1076 		/* RLC autoload sequence 3: load IMU fw */
1077 		if (adev->gfx.imu.funcs->load_microcode)
1078 			adev->gfx.imu.funcs->load_microcode(adev);
1079 	}
1080 
1081 	/* unhalt rlc to start autoload */
1082 	data = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPM_THREAD_ENABLE);
1083 	data = REG_SET_FIELD(data, RLC_GPM_THREAD_ENABLE, THREAD0_ENABLE, 1);
1084 	data = REG_SET_FIELD(data, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1);
1085 	WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPM_THREAD_ENABLE, data);
1086 	WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK);
1087 
1088 	return 0;
1089 }
1090 
1091 static int gfx_v12_1_sw_init(struct amdgpu_ip_block *ip_block)
1092 {
1093 	int i, j, k, r, ring_id = 0;
1094 	unsigned num_compute_rings;
1095 	int xcc_id, num_xcc;
1096 	struct amdgpu_device *adev = ip_block->adev;
1097 
1098 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1099 	case IP_VERSION(12, 1, 0):
1100 		adev->gfx.mec.num_mec = 1;
1101 		adev->gfx.mec.num_pipe_per_mec = 4;
1102 		adev->gfx.mec.num_queue_per_pipe = 8;
1103 		break;
1104 	default:
1105 		adev->gfx.mec.num_mec = 2;
1106 		adev->gfx.mec.num_pipe_per_mec = 2;
1107 		adev->gfx.mec.num_queue_per_pipe = 4;
1108 		break;
1109 	}
1110 
1111 	/* recalculate compute rings to use based on hardware configuration */
1112 	num_compute_rings = (adev->gfx.mec.num_pipe_per_mec *
1113 			     adev->gfx.mec.num_queue_per_pipe) / 2;
1114 	adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings,
1115 					  num_compute_rings);
1116 
1117 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1118 
1119 	/* EOP Event */
1120 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1121 			      GFX_11_0_0__SRCID__CP_EOP_INTERRUPT,
1122 			      &adev->gfx.eop_irq);
1123 	if (r)
1124 		return r;
1125 
1126 	/* Privileged reg */
1127 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1128 			      GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT,
1129 			      &adev->gfx.priv_reg_irq);
1130 	if (r)
1131 		return r;
1132 
1133 	/* Privileged inst */
1134 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1135 			      GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT,
1136 			      &adev->gfx.priv_inst_irq);
1137 	if (r)
1138 		return r;
1139 
1140 	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1141 
1142 	r = gfx_v12_1_rlc_init(adev);
1143 	if (r) {
1144 		dev_err(adev->dev, "Failed to init rlc BOs!\n");
1145 		return r;
1146 	}
1147 
1148 	r = gfx_v12_1_mec_init(adev);
1149 	if (r) {
1150 		dev_err(adev->dev, "Failed to init MEC BOs!\n");
1151 		return r;
1152 	}
1153 
1154 	/* set up the compute queues - allocate horizontally across pipes */
1155 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1156 		ring_id = 0;
1157 		for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1158 			for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1159 				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1160 					if (!amdgpu_gfx_is_mec_queue_enabled(adev,
1161 								xcc_id, i, k, j))
1162 						continue;
1163 
1164 					r = gfx_v12_1_compute_ring_init(adev, ring_id,
1165 								xcc_id, i, k, j);
1166 					if (r)
1167 						return r;
1168 
1169 					ring_id++;
1170 				}
1171 			}
1172 		}
1173 
1174 		if (!adev->enable_mes_kiq) {
1175 			r = amdgpu_gfx_kiq_init(adev, GFX12_MEC_HPD_SIZE, xcc_id);
1176 			if (r) {
1177 				dev_err(adev->dev, "Failed to init KIQ BOs!\n");
1178 				return r;
1179 			}
1180 
1181 			r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
1182 			if (r)
1183 				return r;
1184 		}
1185 
1186 		r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v12_1_compute_mqd), xcc_id);
1187 		if (r)
1188 			return r;
1189 	}
1190 
1191 	/* allocate visible FB for rlc auto-loading fw */
1192 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1193 		r = gfx_v12_1_rlc_autoload_buffer_init(adev);
1194 		if (r)
1195 			return r;
1196 	}
1197 
1198 	r = gfx_v12_1_gpu_early_init(adev);
1199 	if (r)
1200 		return r;
1201 
1202 	return 0;
1203 }
1204 
1205 static void gfx_v12_1_rlc_autoload_buffer_fini(struct amdgpu_device *adev)
1206 {
1207 	amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo,
1208 			&adev->gfx.rlc.rlc_autoload_gpu_addr,
1209 			(void **)&adev->gfx.rlc.rlc_autoload_ptr);
1210 }
1211 
1212 static int gfx_v12_1_sw_fini(struct amdgpu_ip_block *ip_block)
1213 {
1214 	int i, num_xcc;
1215 	struct amdgpu_device *adev = ip_block->adev;
1216 
1217 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1218 	for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++)
1219 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1220 
1221 	for (i = 0; i < num_xcc; i++) {
1222 		amdgpu_gfx_mqd_sw_fini(adev, i);
1223 
1224 		if (!adev->enable_mes_kiq) {
1225 			amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring);
1226 			amdgpu_gfx_kiq_fini(adev, i);
1227 		}
1228 	}
1229 
1230 	gfx_v12_1_rlc_fini(adev);
1231 	gfx_v12_1_mec_fini(adev);
1232 
1233 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
1234 		gfx_v12_1_rlc_autoload_buffer_fini(adev);
1235 
1236 	gfx_v12_1_free_microcode(adev);
1237 
1238 	return 0;
1239 }
1240 
1241 static void gfx_v12_1_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num,
1242 				       u32 sh_num, u32 instance, int xcc_id)
1243 {
1244 	u32 data;
1245 
1246 	if (instance == 0xffffffff)
1247 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
1248 				     INSTANCE_BROADCAST_WRITES, 1);
1249 	else
1250 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
1251 				     instance);
1252 
1253 	if (se_num == 0xffffffff)
1254 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
1255 				     1);
1256 	else
1257 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1258 
1259 	if (sh_num == 0xffffffff)
1260 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES,
1261 				     1);
1262 	else
1263 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num);
1264 
1265 	WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data);
1266 }
1267 
1268 static u32 gfx_v12_1_get_sa_active_bitmap(struct amdgpu_device *adev,
1269 					  int xcc_id)
1270 {
1271 	u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask;
1272 
1273 	gc_disabled_sa_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SA_UNIT_DISABLE);
1274 	gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask,
1275 					    CC_GC_SA_UNIT_DISABLE,
1276 					    SA_DISABLE);
1277 	gc_user_disabled_sa_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SA_UNIT_DISABLE);
1278 	gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask,
1279 						 GC_USER_SA_UNIT_DISABLE,
1280 						 SA_DISABLE);
1281 	sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se *
1282 					    adev->gfx.config.max_shader_engines);
1283 
1284 	return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask));
1285 }
1286 
1287 static u32 gfx_v12_1_get_rb_active_bitmap(struct amdgpu_device *adev,
1288 					  int xcc_id)
1289 {
1290 	u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask;
1291 	u32 rb_mask;
1292 
1293 	gc_disabled_rb_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
1294 					   regCC_RB_BACKEND_DISABLE);
1295 	gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask,
1296 					    CC_RB_BACKEND_DISABLE,
1297 					    BACKEND_DISABLE);
1298 	gc_user_disabled_rb_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
1299 						regGC_USER_RB_BACKEND_DISABLE);
1300 	gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask,
1301 						 GC_USER_RB_BACKEND_DISABLE,
1302 						 BACKEND_DISABLE);
1303 	rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se *
1304 					    adev->gfx.config.max_shader_engines);
1305 
1306 	return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask));
1307 }
1308 
1309 static void gfx_v12_1_setup_rb(struct amdgpu_device *adev)
1310 {
1311 	u32 rb_bitmap_width_per_sa;
1312 	u32 max_sa;
1313 	u32 active_sa_bitmap;
1314 	u32 global_active_rb_bitmap;
1315 	u32 active_rb_bitmap = 0;
1316 	u32 i;
1317 	int xcc_id;
1318 
1319 	for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
1320 		/* query sa bitmap from SA_UNIT_DISABLE registers */
1321 		active_sa_bitmap = gfx_v12_1_get_sa_active_bitmap(adev, xcc_id);
1322 		/* query rb bitmap from RB_BACKEND_DISABLE registers */
1323 		global_active_rb_bitmap = gfx_v12_1_get_rb_active_bitmap(adev, xcc_id);
1324 
1325 		/* generate active rb bitmap according to active sa bitmap */
1326 		max_sa = adev->gfx.config.max_shader_engines *
1327 			 adev->gfx.config.max_sh_per_se;
1328 		rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
1329 					 adev->gfx.config.max_sh_per_se;
1330 		for (i = 0; i < max_sa; i++) {
1331 			if (active_sa_bitmap & (1 << i))
1332 				active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
1333 		}
1334 
1335 		active_rb_bitmap |= global_active_rb_bitmap;
1336 	}
1337 
1338 	adev->gfx.config.backend_enable_mask = active_rb_bitmap;
1339 	adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
1340 }
1341 
1342 #define LDS_APP_BASE           0x2000
1343 #define SCRATCH_APP_BASE       0x4
1344 
1345 static void gfx_v12_1_xcc_init_compute_vmid(struct amdgpu_device *adev,
1346 					    int xcc_id)
1347 {
1348 	int i;
1349 	uint32_t sh_mem_bases;
1350 	uint32_t data;
1351 
1352 	/*
1353 	 * Configure apertures:
1354 	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
1355 	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
1356 	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
1357 	 */
1358 	sh_mem_bases = (LDS_APP_BASE << SH_MEM_BASES__SHARED_BASE__SHIFT) |
1359 			(SCRATCH_APP_BASE << SH_MEM_BASES__PRIVATE_BASE__SHIFT);
1360 
1361 	mutex_lock(&adev->srbm_mutex);
1362 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1363 		soc_v1_0_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1364 		/* CP and shaders */
1365 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1366 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases);
1367 
1368 		/* Enable trap for each kfd vmid. */
1369 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL);
1370 		data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
1371 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data);
1372 
1373 		/* Disable VGPR deallocation instruction for each KFD vmid. */
1374 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_DEBUG);
1375 		data = REG_SET_FIELD(data, SQ_DEBUG, DISABLE_VGPR_DEALLOC, 1);
1376 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_DEBUG, data);
1377 	}
1378 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1379 	mutex_unlock(&adev->srbm_mutex);
1380 }
1381 
1382 static void gfx_v12_1_tcp_harvest(struct amdgpu_device *adev)
1383 {
1384 	/* TODO: harvest feature to be added later. */
1385 }
1386 
1387 static void gfx_v12_1_get_tcc_info(struct amdgpu_device *adev)
1388 {
1389 }
1390 
1391 static void gfx_v12_1_xcc_constants_init(struct amdgpu_device *adev,
1392 					 int xcc_id)
1393 {
1394 	u32 tmp;
1395 	int i;
1396 
1397 	/* XXX SH_MEM regs */
1398 	/* where to put LDS, scratch, GPUVM in FSA64 space */
1399 	mutex_lock(&adev->srbm_mutex);
1400 	for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
1401 		soc_v1_0_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1402 		/* CP and shaders */
1403 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1404 			     regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1405 		if (i != 0) {
1406 			tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1407 				(adev->gmc.private_aperture_start >> 58));
1408 			tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1409 				(adev->gmc.shared_aperture_start >> 48));
1410 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, tmp);
1411 		}
1412 	}
1413 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0));
1414 
1415 	mutex_unlock(&adev->srbm_mutex);
1416 
1417 	gfx_v12_1_xcc_init_compute_vmid(adev, xcc_id);
1418 }
1419 
1420 static void gfx_v12_1_constants_init(struct amdgpu_device *adev)
1421 {
1422 	int i, num_xcc;
1423 
1424 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1425 
1426 	gfx_v12_1_setup_rb(adev);
1427 	gfx_v12_1_get_cu_info(adev, &adev->gfx.cu_info);
1428 	gfx_v12_1_get_tcc_info(adev);
1429 	adev->gfx.config.pa_sc_tile_steering_override = 0;
1430 
1431 	for (i = 0; i < num_xcc; i++)
1432 		gfx_v12_1_xcc_constants_init(adev, i);
1433 }
1434 
1435 static void gfx_v12_1_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1436 						    bool enable, int xcc_id)
1437 {
1438 	u32 tmp;
1439 
1440 	if (amdgpu_sriov_vf(adev))
1441 		return;
1442 
1443 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0);
1444 
1445 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
1446 			    enable ? 1 : 0);
1447 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
1448 			    enable ? 1 : 0);
1449 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
1450 			    enable ? 1 : 0);
1451 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
1452 			    enable ? 1 : 0);
1453 
1454 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp);
1455 }
1456 
1457 static int gfx_v12_1_xcc_init_csb(struct amdgpu_device *adev,
1458 				  int xcc_id)
1459 {
1460 	adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
1461 
1462 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CSIB_ADDR_HI,
1463 			adev->gfx.rlc.clear_state_gpu_addr >> 32);
1464 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CSIB_ADDR_LO,
1465 			adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1466 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1467 		     regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
1468 
1469 	return 0;
1470 }
1471 
1472 static void gfx_v12_1_xcc_rlc_stop(struct amdgpu_device *adev,
1473 				   int xcc_id)
1474 {
1475 	u32 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CNTL);
1476 
1477 	tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
1478 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CNTL, tmp);
1479 }
1480 
1481 static void gfx_v12_1_rlc_stop(struct amdgpu_device *adev)
1482 {
1483 	int i, num_xcc;
1484 
1485 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1486 	for (i = 0; i < num_xcc; i++)
1487 		gfx_v12_1_xcc_rlc_stop(adev, i);
1488 }
1489 
1490 static void gfx_v12_1_xcc_rlc_reset(struct amdgpu_device *adev,
1491 				    int xcc_id)
1492 {
1493 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id),
1494 			      GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
1495 	udelay(50);
1496 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id),
1497 			      GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
1498 	udelay(50);
1499 }
1500 
1501 static void gfx_v12_1_rlc_reset(struct amdgpu_device *adev)
1502 {
1503 	int i, num_xcc;
1504 
1505 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1506 	for (i = 0; i < num_xcc; i++)
1507 		gfx_v12_1_xcc_rlc_reset(adev, i);
1508 }
1509 
1510 static void gfx_v12_1_xcc_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
1511 						 bool enable, int xcc_id)
1512 {
1513 	uint32_t rlc_pg_cntl;
1514 
1515 	rlc_pg_cntl = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_PG_CNTL);
1516 
1517 	if (!enable) {
1518 		/* RLC_PG_CNTL[23] = 0 (default)
1519 		 * RLC will wait for handshake acks with SMU
1520 		 * GFXOFF will be enabled
1521 		 * RLC_PG_CNTL[23] = 1
1522 		 * RLC will not issue any message to SMU
1523 		 * hence no handshake between SMU & RLC
1524 		 * GFXOFF will be disabled
1525 		 */
1526 		rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
1527 	} else
1528 		rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
1529 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_PG_CNTL, rlc_pg_cntl);
1530 }
1531 
1532 static void gfx_v12_1_xcc_rlc_start(struct amdgpu_device *adev,
1533 				    int xcc_id)
1534 {
1535 	/* TODO: enable rlc & smu handshake until smu
1536 	 * and gfxoff feature works as expected */
1537 	if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK))
1538 		gfx_v12_1_xcc_rlc_smu_handshake_cntl(adev, false, xcc_id);
1539 
1540 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL, RLC_ENABLE_F32, 1);
1541 	udelay(50);
1542 }
1543 
1544 static void gfx_v12_1_rlc_start(struct amdgpu_device *adev)
1545 {
1546 	int i, num_xcc;
1547 
1548 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1549 	for (i = 0; i < num_xcc; i++) {
1550 		gfx_v12_1_xcc_rlc_start(adev, i);
1551 	}
1552 }
1553 
1554 static void gfx_v12_1_xcc_rlc_enable_srm(struct amdgpu_device *adev,
1555 					 int xcc_id)
1556 {
1557 	uint32_t tmp;
1558 
1559 	/* enable Save Restore Machine */
1560 	tmp = RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SRM_CNTL));
1561 	tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1562 	tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
1563 	WREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SRM_CNTL), tmp);
1564 }
1565 
1566 static void gfx_v12_1_xcc_load_rlcg_microcode(struct amdgpu_device *adev,
1567 					      int xcc_id)
1568 {
1569 	const struct rlc_firmware_header_v2_0 *hdr;
1570 	const __le32 *fw_data;
1571 	unsigned i, fw_size;
1572 
1573 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1574 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1575 			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1576 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1577 
1578 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR,
1579 		     RLCG_UCODE_LOADING_START_ADDRESS);
1580 
1581 	for (i = 0; i < fw_size; i++)
1582 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1583 			     regRLC_GPM_UCODE_DATA,
1584 			     le32_to_cpup(fw_data++));
1585 
1586 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1587 		     regRLC_GPM_UCODE_ADDR,
1588 		     adev->gfx.rlc_fw_version);
1589 }
1590 
1591 static void gfx_v12_1_xcc_load_rlc_iram_dram_microcode(struct amdgpu_device *adev,
1592 						       int xcc_id)
1593 {
1594 	const struct rlc_firmware_header_v2_2 *hdr;
1595 	const __le32 *fw_data;
1596 	unsigned i, fw_size;
1597 	u32 tmp;
1598 
1599 	hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1600 
1601 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1602 			le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes));
1603 	fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4;
1604 
1605 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_LX6_IRAM_ADDR, 0);
1606 
1607 	for (i = 0; i < fw_size; i++) {
1608 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1609 			msleep(1);
1610 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1611 			     regRLC_LX6_IRAM_DATA,
1612 			     le32_to_cpup(fw_data++));
1613 	}
1614 
1615 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1616 		     regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
1617 
1618 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1619 			le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes));
1620 	fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4;
1621 
1622 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1623 		     regRLC_LX6_DRAM_ADDR, 0);
1624 	for (i = 0; i < fw_size; i++) {
1625 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1626 			msleep(1);
1627 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1628 			     regRLC_LX6_DRAM_DATA,
1629 			     le32_to_cpup(fw_data++));
1630 	}
1631 
1632 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
1633 		     regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
1634 
1635 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_LX6_CNTL);
1636 	tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1);
1637 	tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0);
1638 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_LX6_CNTL, tmp);
1639 }
1640 
1641 static int gfx_v12_1_xcc_rlc_load_microcode(struct amdgpu_device *adev,
1642 					    int xcc_id)
1643 {
1644 	const struct rlc_firmware_header_v2_0 *hdr;
1645 	uint16_t version_major;
1646 	uint16_t version_minor;
1647 
1648 	if (!adev->gfx.rlc_fw)
1649 		return -EINVAL;
1650 
1651 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1652 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
1653 
1654 	version_major = le16_to_cpu(hdr->header.header_version_major);
1655 	version_minor = le16_to_cpu(hdr->header.header_version_minor);
1656 
1657 	if (version_major == 2) {
1658 		gfx_v12_1_xcc_load_rlcg_microcode(adev, xcc_id);
1659 		if (amdgpu_dpm == 1) {
1660 			if (version_minor >= 2)
1661 				gfx_v12_1_xcc_load_rlc_iram_dram_microcode(adev, xcc_id);
1662 		}
1663 
1664 		return 0;
1665 	}
1666 
1667 	return -EINVAL;
1668 }
1669 
1670 static int gfx_v12_1_xcc_rlc_resume(struct amdgpu_device *adev,
1671 				    int xcc_id)
1672 {
1673 	int r;
1674 
1675 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1676 		gfx_v12_1_xcc_init_csb(adev, xcc_id);
1677 
1678 		if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
1679 			gfx_v12_1_xcc_rlc_enable_srm(adev, xcc_id);
1680 	} else {
1681 		if (amdgpu_sriov_vf(adev)) {
1682 			gfx_v12_1_xcc_init_csb(adev, xcc_id);
1683 			return 0;
1684 		}
1685 
1686 		gfx_v12_1_xcc_rlc_stop(adev, xcc_id);
1687 
1688 		/* disable CG */
1689 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0);
1690 
1691 		/* disable PG */
1692 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_PG_CNTL, 0);
1693 
1694 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1695 			/* legacy rlc firmware loading */
1696 			r = gfx_v12_1_xcc_rlc_load_microcode(adev, xcc_id);
1697 			if (r)
1698 				return r;
1699 		}
1700 
1701 		gfx_v12_1_xcc_init_csb(adev, xcc_id);
1702 
1703 		gfx_v12_1_xcc_rlc_start(adev, xcc_id);
1704 	}
1705 
1706 	return 0;
1707 }
1708 
1709 static int gfx_v12_1_rlc_resume(struct amdgpu_device *adev)
1710 {
1711 	int r, i, num_xcc;
1712 
1713 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1714 	for (i = 0; i < num_xcc; i++) {
1715 		r = gfx_v12_1_xcc_rlc_resume(adev, i);
1716 		if (r)
1717 			return r;
1718 	}
1719 
1720 	return 0;
1721 }
1722 
1723 static void gfx_v12_1_xcc_config_gfx_rs64(struct amdgpu_device *adev,
1724 					  int xcc_id)
1725 {
1726 	const struct gfx_firmware_header_v2_0 *mec_hdr;
1727 	uint32_t pipe_id, tmp;
1728 
1729 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)
1730 		adev->gfx.mec_fw->data;
1731 
1732 	/* config mec program start addr */
1733 	for (pipe_id = 0; pipe_id < 4; pipe_id++) {
1734 		soc_v1_0_grbm_select(adev, 1, pipe_id, 0, 0, GET_INST(GC, xcc_id));
1735 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START,
1736 					mec_hdr->ucode_start_addr_lo >> 2 |
1737 					mec_hdr->ucode_start_addr_hi << 30);
1738 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START_HI,
1739 					mec_hdr->ucode_start_addr_hi >> 2);
1740 	}
1741 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0));
1742 
1743 	/* reset mec pipe */
1744 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL);
1745 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1);
1746 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1);
1747 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1);
1748 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1);
1749 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL, tmp);
1750 
1751 	/* clear mec pipe reset */
1752 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0);
1753 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0);
1754 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0);
1755 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0);
1756 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL, tmp);
1757 }
1758 
1759 static void gfx_v12_1_config_gfx_rs64(struct amdgpu_device *adev)
1760 {
1761 	int i, num_xcc;
1762 
1763 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1764 
1765 	for (i = 0; i < num_xcc; i++)
1766 		gfx_v12_1_xcc_config_gfx_rs64(adev, i);
1767 }
1768 
1769 static void gfx_v12_1_xcc_set_mec_ucode_start_addr(struct amdgpu_device *adev,
1770 						   int xcc_id)
1771 {
1772 	const struct gfx_firmware_header_v2_0 *cp_hdr;
1773 	unsigned pipe_id;
1774 
1775 	cp_hdr = (const struct gfx_firmware_header_v2_0 *)
1776 		adev->gfx.mec_fw->data;
1777 	mutex_lock(&adev->srbm_mutex);
1778 	for (pipe_id = 0; pipe_id < adev->gfx.mec.num_pipe_per_mec; pipe_id++) {
1779 		soc_v1_0_grbm_select(adev, 1, pipe_id, 0, 0, GET_INST(GC, xcc_id));
1780 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START,
1781 			     cp_hdr->ucode_start_addr_lo >> 2 |
1782 			     cp_hdr->ucode_start_addr_hi << 30);
1783 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START_HI,
1784 			     cp_hdr->ucode_start_addr_hi >> 2);
1785 	}
1786 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0));
1787 	mutex_unlock(&adev->srbm_mutex);
1788 }
1789 
1790 static int gfx_v12_1_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
1791 {
1792 	uint32_t cp_status;
1793 	uint32_t bootload_status;
1794 	int i, xcc_id;
1795 
1796 	for (i = 0; i < adev->usec_timeout; i++) {
1797 		cp_status = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_STAT);
1798 		bootload_status = RREG32_SOC15(GC, GET_INST(GC, 0),
1799 					       regRLC_RLCS_BOOTLOAD_STATUS);
1800 
1801 		if ((cp_status == 0) &&
1802 		    (REG_GET_FIELD(bootload_status,
1803 			RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) {
1804 			break;
1805 		}
1806 		udelay(1);
1807 		if (amdgpu_emu_mode)
1808 			msleep(10);
1809 	}
1810 
1811 	if (i >= adev->usec_timeout) {
1812 		dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n");
1813 		return -ETIMEDOUT;
1814 	}
1815 
1816 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1817 		for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++)
1818 			gfx_v12_1_xcc_set_mec_ucode_start_addr(adev, xcc_id);
1819 	}
1820 
1821 	return 0;
1822 }
1823 
1824 static void gfx_v12_1_xcc_cp_compute_enable(struct amdgpu_device *adev,
1825 					    bool enable, int xcc_id)
1826 {
1827 	u32 data;
1828 
1829 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL);
1830 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE,
1831 						 enable ? 0 : 1);
1832 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET,
1833 						 enable ? 0 : 1);
1834 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET,
1835 						 enable ? 0 : 1);
1836 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET,
1837 						 enable ? 0 : 1);
1838 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET,
1839 						 enable ? 0 : 1);
1840 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE,
1841 						 enable ? 1 : 0);
1842 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE,
1843 			                         enable ? 1 : 0);
1844 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE,
1845 						 enable ? 1 : 0);
1846 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE,
1847 						 enable ? 1 : 0);
1848 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT,
1849 						 enable ? 0 : 1);
1850 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL, data);
1851 
1852 	adev->gfx.kiq[xcc_id].ring.sched.ready = enable;
1853 
1854 	udelay(50);
1855 }
1856 
1857 static int gfx_v12_1_xcc_cp_compute_load_microcode_rs64(struct amdgpu_device *adev,
1858 							int xcc_id)
1859 {
1860 	const struct gfx_firmware_header_v2_0 *mec_hdr;
1861 	const __le32 *fw_ucode, *fw_data;
1862 	u32 tmp, fw_ucode_size, fw_data_size;
1863 	u32 i, usec_timeout = 50000; /* Wait for 50 ms */
1864 	u32 *fw_ucode_ptr, *fw_data_ptr;
1865 	int r;
1866 
1867 	if (!adev->gfx.mec_fw)
1868 		return -EINVAL;
1869 
1870 	gfx_v12_1_xcc_cp_compute_enable(adev, false, xcc_id);
1871 
1872 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
1873 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
1874 
1875 	fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data +
1876 				le32_to_cpu(mec_hdr->ucode_offset_bytes));
1877 	fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes);
1878 
1879 	fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1880 				le32_to_cpu(mec_hdr->data_offset_bytes));
1881 	fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes);
1882 
1883 	r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
1884 				      64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
1885 				      &adev->gfx.mec.mec_fw_obj,
1886 				      &adev->gfx.mec.mec_fw_gpu_addr,
1887 				      (void **)&fw_ucode_ptr);
1888 	if (r) {
1889 		dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
1890 		gfx_v12_1_mec_fini(adev);
1891 		return r;
1892 	}
1893 
1894 	r = amdgpu_bo_create_reserved(adev,
1895 				      ALIGN(fw_data_size, 64 * 1024) *
1896 				      adev->gfx.mec.num_pipe_per_mec,
1897 				      64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
1898 				      &adev->gfx.mec.mec_fw_data_obj,
1899 				      &adev->gfx.mec.mec_fw_data_gpu_addr,
1900 				      (void **)&fw_data_ptr);
1901 	if (r) {
1902 		dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
1903 		gfx_v12_1_mec_fini(adev);
1904 		return r;
1905 	}
1906 
1907 	memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size);
1908 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
1909 		memcpy(fw_data_ptr + i * ALIGN(fw_data_size, 64 * 1024) / 4, fw_data, fw_data_size);
1910 	}
1911 
1912 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1913 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj);
1914 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1915 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj);
1916 
1917 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL);
1918 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
1919 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
1920 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
1921 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp);
1922 
1923 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_BASE_CNTL);
1924 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
1925 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
1926 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_BASE_CNTL, tmp);
1927 
1928 	mutex_lock(&adev->srbm_mutex);
1929 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
1930 		soc_v1_0_grbm_select(adev, 1, i, 0, 0, GET_INST(GC, xcc_id));
1931 
1932 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_MDBASE_LO,
1933 			     lower_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr +
1934 					   i * ALIGN(fw_data_size, 64 * 1024)));
1935 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_MDBASE_HI,
1936 			     upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr +
1937 					   i * ALIGN(fw_data_size, 64 * 1024)));
1938 
1939 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO,
1940 			     lower_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
1941 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI,
1942 			     upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
1943 	}
1944 	mutex_unlock(&adev->srbm_mutex);
1945 	soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0));
1946 
1947 	/* Trigger an invalidation of the L1 instruction caches */
1948 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_OP_CNTL);
1949 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
1950 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_OP_CNTL, tmp);
1951 
1952 	/* Wait for invalidation complete */
1953 	for (i = 0; i < usec_timeout; i++) {
1954 		tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_OP_CNTL);
1955 		if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
1956 				       INVALIDATE_DCACHE_COMPLETE))
1957 			break;
1958 		udelay(1);
1959 	}
1960 
1961 	if (i >= usec_timeout) {
1962 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
1963 		return -EINVAL;
1964 	}
1965 
1966 	/* Trigger an invalidation of the L1 instruction caches */
1967 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_OP_CNTL);
1968 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
1969 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_OP_CNTL, tmp);
1970 
1971 	/* Wait for invalidation complete */
1972 	for (i = 0; i < usec_timeout; i++) {
1973 		tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_OP_CNTL);
1974 		if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
1975 				       INVALIDATE_CACHE_COMPLETE))
1976 			break;
1977 		udelay(1);
1978 	}
1979 
1980 	if (i >= usec_timeout) {
1981 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
1982 		return -EINVAL;
1983 	}
1984 
1985 	gfx_v12_1_xcc_set_mec_ucode_start_addr(adev, xcc_id);
1986 
1987 	return 0;
1988 }
1989 
1990 static void gfx_v12_1_xcc_kiq_setting(struct amdgpu_ring *ring,
1991 				      int xcc_id)
1992 {
1993 	uint32_t tmp;
1994 	struct amdgpu_device *adev = ring->adev;
1995 
1996 	/* tell RLC which is KIQ queue */
1997 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
1998 	tmp &= 0xffffff00;
1999 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2000 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
2001 	tmp |= 0x80;
2002 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
2003 }
2004 
2005 static void gfx_v12_1_xcc_cp_set_doorbell_range(struct amdgpu_device *adev,
2006 						int xcc_id)
2007 {
2008 	/* disable gfx engine doorbell range */
2009 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_DOORBELL_RANGE_LOWER, 0);
2010 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_DOORBELL_RANGE_UPPER, 0);
2011 
2012 	/* set compute engine doorbell range */
2013 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DOORBELL_RANGE_LOWER,
2014 		     ((adev->doorbell_index.kiq +
2015 		       xcc_id * adev->doorbell_index.xcc_doorbell_range) *
2016 		      2) << 2);
2017 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DOORBELL_RANGE_UPPER,
2018 		     ((adev->doorbell_index.userqueue_end +
2019 		       xcc_id * adev->doorbell_index.xcc_doorbell_range) *
2020 		      2) << 2);
2021 }
2022 
2023 static int gfx_v12_1_compute_mqd_init(struct amdgpu_device *adev, void *m,
2024 				      struct amdgpu_mqd_prop *prop)
2025 {
2026 	struct v12_1_compute_mqd *mqd = m;
2027 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
2028 	uint32_t tmp;
2029 
2030 	mqd->header = 0xC0310800;
2031 	mqd->compute_pipelinestat_enable = 0x00000001;
2032 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
2033 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
2034 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
2035 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2036 	mqd->compute_misc_reserved = 0x00000007;
2037 
2038 	eop_base_addr = prop->eop_gpu_addr >> 8;
2039 	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
2040 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
2041 
2042 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2043 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_EOP_CONTROL);
2044 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
2045 			(order_base_2(GFX12_MEC_HPD_SIZE / 4) - 1));
2046 
2047 	mqd->cp_hqd_eop_control = tmp;
2048 
2049 	/* enable doorbell? */
2050 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PQ_DOORBELL_CONTROL);
2051 
2052 	if (prop->use_doorbell) {
2053 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2054 				    DOORBELL_OFFSET, prop->doorbell_index);
2055 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2056 				    DOORBELL_EN, 1);
2057 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2058 				    DOORBELL_SOURCE, 0);
2059 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2060 				    DOORBELL_HIT, 0);
2061 	} else {
2062 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2063 				    DOORBELL_EN, 0);
2064 	}
2065 
2066 	mqd->cp_hqd_pq_doorbell_control = tmp;
2067 
2068 	/* disable the queue if it's active */
2069 	mqd->cp_hqd_dequeue_request = 0;
2070 	mqd->cp_hqd_pq_rptr = 0;
2071 	mqd->cp_hqd_pq_wptr_lo = 0;
2072 	mqd->cp_hqd_pq_wptr_hi = 0;
2073 
2074 	/* set the pointer to the MQD */
2075 	mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc;
2076 	mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
2077 
2078 	/* set MQD vmid to 0 */
2079 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_MQD_CONTROL);
2080 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
2081 	mqd->cp_mqd_control = tmp;
2082 
2083 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2084 	hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
2085 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
2086 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2087 
2088 	/* set up the HQD, this is similar to CP_RB0_CNTL */
2089 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PQ_CONTROL);
2090 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
2091 			    (order_base_2(prop->queue_size / 4) - 1));
2092 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
2093 			    (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
2094 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
2095 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
2096 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
2097 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
2098 	mqd->cp_hqd_pq_control = tmp;
2099 
2100 	/* set the wb address whether it's enabled or not */
2101 	wb_gpu_addr = prop->rptr_gpu_addr;
2102 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
2103 	mqd->cp_hqd_pq_rptr_report_addr_hi =
2104 		upper_32_bits(wb_gpu_addr) & 0xffff;
2105 
2106 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2107 	wb_gpu_addr = prop->wptr_gpu_addr;
2108 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2109 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2110 
2111 	tmp = 0;
2112 	/* enable the doorbell if requested */
2113 	if (prop->use_doorbell) {
2114 		tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PQ_DOORBELL_CONTROL);
2115 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2116 				DOORBELL_OFFSET, prop->doorbell_index);
2117 
2118 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2119 				    DOORBELL_EN, 1);
2120 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2121 				    DOORBELL_SOURCE, 0);
2122 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2123 				    DOORBELL_HIT, 0);
2124 	}
2125 
2126 	mqd->cp_hqd_pq_doorbell_control = tmp;
2127 
2128 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2129 	mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PQ_RPTR);
2130 
2131 	/* set the vmid for the queue */
2132 	mqd->cp_hqd_vmid = 0;
2133 
2134 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PERSISTENT_STATE);
2135 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x63);
2136 	mqd->cp_hqd_persistent_state = tmp;
2137 
2138 	/* set MIN_IB_AVAIL_SIZE */
2139 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_IB_CONTROL);
2140 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 1);
2141 	mqd->cp_hqd_ib_control = tmp;
2142 
2143 	/* set static priority for a compute queue/ring */
2144 	mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority;
2145 	mqd->cp_hqd_queue_priority = prop->hqd_queue_priority;
2146 
2147 	mqd->cp_hqd_active = prop->hqd_active;
2148 
2149 	return 0;
2150 }
2151 
2152 static int gfx_v12_1_xcc_kiq_init_register(struct amdgpu_ring *ring,
2153 					   int xcc_id)
2154 {
2155 	struct amdgpu_device *adev = ring->adev;
2156 	struct v12_1_compute_mqd *mqd = ring->mqd_ptr;
2157 	int j;
2158 
2159 	/* inactivate the queue */
2160 	if (amdgpu_sriov_vf(adev))
2161 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0);
2162 
2163 	/* disable wptr polling */
2164 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
2165 
2166 	/* write the EOP addr */
2167 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR,
2168 	       mqd->cp_hqd_eop_base_addr_lo);
2169 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI,
2170 	       mqd->cp_hqd_eop_base_addr_hi);
2171 
2172 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2173 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL,
2174 	       mqd->cp_hqd_eop_control);
2175 
2176 	/* enable doorbell? */
2177 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
2178 	       mqd->cp_hqd_pq_doorbell_control);
2179 
2180 	/* disable the queue if it's active */
2181 	if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
2182 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
2183 		for (j = 0; j < adev->usec_timeout; j++) {
2184 			if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
2185 				break;
2186 			udelay(1);
2187 		}
2188 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
2189 		       mqd->cp_hqd_dequeue_request);
2190 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR,
2191 		       mqd->cp_hqd_pq_rptr);
2192 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
2193 		       mqd->cp_hqd_pq_wptr_lo);
2194 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
2195 		       mqd->cp_hqd_pq_wptr_hi);
2196 	}
2197 
2198 	/* set the pointer to the MQD */
2199 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR,
2200 	       mqd->cp_mqd_base_addr_lo);
2201 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI,
2202 	       mqd->cp_mqd_base_addr_hi);
2203 
2204 	/* set MQD vmid to 0 */
2205 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL,
2206 	       mqd->cp_mqd_control);
2207 
2208 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2209 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE,
2210 	       mqd->cp_hqd_pq_base_lo);
2211 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI,
2212 	       mqd->cp_hqd_pq_base_hi);
2213 
2214 	/* set up the HQD, this is similar to CP_RB0_CNTL */
2215 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL,
2216 	       mqd->cp_hqd_pq_control);
2217 
2218 	/* set the wb address whether it's enabled or not */
2219 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR,
2220 		mqd->cp_hqd_pq_rptr_report_addr_lo);
2221 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
2222 		mqd->cp_hqd_pq_rptr_report_addr_hi);
2223 
2224 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2225 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR,
2226 	       mqd->cp_hqd_pq_wptr_poll_addr_lo);
2227 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
2228 	       mqd->cp_hqd_pq_wptr_poll_addr_hi);
2229 
2230 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
2231 	       mqd->cp_hqd_pq_doorbell_control);
2232 
2233 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2234 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
2235 	       mqd->cp_hqd_pq_wptr_lo);
2236 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
2237 	       mqd->cp_hqd_pq_wptr_hi);
2238 
2239 	/* set the vmid for the queue */
2240 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_VMID, mqd->cp_hqd_vmid);
2241 
2242 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE,
2243 	       mqd->cp_hqd_persistent_state);
2244 
2245 	/* activate the queue */
2246 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE,
2247 	       mqd->cp_hqd_active);
2248 
2249 	if (ring->use_doorbell)
2250 		WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_STATUS, DOORBELL_ENABLE, 1);
2251 
2252 	return 0;
2253 }
2254 
2255 static int gfx_v12_1_xcc_kiq_init_queue(struct amdgpu_ring *ring,
2256 					int xcc_id)
2257 {
2258 	struct amdgpu_device *adev = ring->adev;
2259 	struct v12_1_compute_mqd *mqd = ring->mqd_ptr;
2260 
2261 	gfx_v12_1_xcc_kiq_setting(ring, xcc_id);
2262 
2263 	if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
2264 		/* reset MQD to a clean status */
2265 		if (adev->gfx.kiq[xcc_id].mqd_backup)
2266 			memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(*mqd));
2267 
2268 		/* reset ring buffer */
2269 		ring->wptr = 0;
2270 		amdgpu_ring_clear_ring(ring);
2271 
2272 		mutex_lock(&adev->srbm_mutex);
2273 		soc_v1_0_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2274 		gfx_v12_1_xcc_kiq_init_register(ring, xcc_id);
2275 		soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2276 		mutex_unlock(&adev->srbm_mutex);
2277 	} else {
2278 		memset((void *)mqd, 0, sizeof(*mqd));
2279 		if (amdgpu_sriov_vf(adev) && adev->in_suspend)
2280 			amdgpu_ring_clear_ring(ring);
2281 		mutex_lock(&adev->srbm_mutex);
2282 		soc_v1_0_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2283 		amdgpu_ring_init_mqd(ring);
2284 		gfx_v12_1_xcc_kiq_init_register(ring, xcc_id);
2285 		soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2286 		mutex_unlock(&adev->srbm_mutex);
2287 
2288 		if (adev->gfx.kiq[xcc_id].mqd_backup)
2289 			memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(*mqd));
2290 	}
2291 
2292 	return 0;
2293 }
2294 
2295 static int gfx_v12_1_xcc_kcq_init_queue(struct amdgpu_ring *ring,
2296 					int xcc_id)
2297 {
2298 	struct amdgpu_device *adev = ring->adev;
2299 	struct v12_1_compute_mqd *mqd = ring->mqd_ptr;
2300 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
2301 
2302 	if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2303 		memset((void *)mqd, 0, sizeof(*mqd));
2304 		mutex_lock(&adev->srbm_mutex);
2305 		soc_v1_0_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2306 		amdgpu_ring_init_mqd(ring);
2307 		soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2308 		mutex_unlock(&adev->srbm_mutex);
2309 
2310 		if (adev->gfx.mec.mqd_backup[mqd_idx])
2311 			memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
2312 	} else {
2313 		/* restore MQD to a clean status */
2314 		if (adev->gfx.mec.mqd_backup[mqd_idx])
2315 			memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
2316 		/* reset ring buffer */
2317 		ring->wptr = 0;
2318 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
2319 		amdgpu_ring_clear_ring(ring);
2320 	}
2321 
2322 	return 0;
2323 }
2324 
2325 static int gfx_v12_1_xcc_kiq_resume(struct amdgpu_device *adev,
2326 				    int xcc_id)
2327 {
2328 	struct amdgpu_ring *ring;
2329 	int r;
2330 
2331 	ring = &adev->gfx.kiq[xcc_id].ring;
2332 
2333 	r = amdgpu_bo_reserve(ring->mqd_obj, false);
2334 	if (unlikely(r != 0))
2335 		return r;
2336 
2337 	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2338 	if (unlikely(r != 0)) {
2339 		amdgpu_bo_unreserve(ring->mqd_obj);
2340 		return r;
2341 	}
2342 
2343 	gfx_v12_1_xcc_kiq_init_queue(ring, xcc_id);
2344 	amdgpu_bo_kunmap(ring->mqd_obj);
2345 	ring->mqd_ptr = NULL;
2346 	amdgpu_bo_unreserve(ring->mqd_obj);
2347 	ring->sched.ready = true;
2348 	return 0;
2349 }
2350 
2351 static int gfx_v12_1_xcc_kcq_resume(struct amdgpu_device *adev,
2352 				    int xcc_id)
2353 {
2354 	struct amdgpu_ring *ring = NULL;
2355 	int r = 0, i;
2356 
2357 	if (!amdgpu_async_gfx_ring)
2358 		gfx_v12_1_xcc_cp_compute_enable(adev, true, xcc_id);
2359 
2360 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2361 		ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
2362 
2363 		r = amdgpu_bo_reserve(ring->mqd_obj, false);
2364 		if (unlikely(r != 0))
2365 			goto done;
2366 		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2367 		if (!r) {
2368 			r = gfx_v12_1_xcc_kcq_init_queue(ring, xcc_id);
2369 			amdgpu_bo_kunmap(ring->mqd_obj);
2370 			ring->mqd_ptr = NULL;
2371 		}
2372 		amdgpu_bo_unreserve(ring->mqd_obj);
2373 		if (r)
2374 			goto done;
2375 	}
2376 
2377 	r = amdgpu_gfx_enable_kcq(adev, xcc_id);
2378 done:
2379 	return r;
2380 }
2381 
2382 static int gfx_v12_1_xcc_cp_resume(struct amdgpu_device *adev,
2383 				   int xcc_id)
2384 {
2385 	int r, i;
2386 	struct amdgpu_ring *ring;
2387 
2388 	if (!(adev->flags & AMD_IS_APU))
2389 		gfx_v12_1_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
2390 
2391 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
2392 		/* legacy firmware loading */
2393 		r = gfx_v12_1_xcc_cp_compute_load_microcode_rs64(adev, xcc_id);
2394 		if (r)
2395 			return r;
2396 	}
2397 
2398 	gfx_v12_1_xcc_cp_set_doorbell_range(adev, xcc_id);
2399 
2400 	gfx_v12_1_xcc_cp_compute_enable(adev, true, xcc_id);
2401 
2402 	if (adev->enable_mes_kiq && adev->mes.kiq_hw_init)
2403 		r = amdgpu_mes_kiq_hw_init(adev, xcc_id);
2404 	else
2405 		r = gfx_v12_1_xcc_kiq_resume(adev, xcc_id);
2406 	if (r)
2407 		return r;
2408 
2409 	r = gfx_v12_1_xcc_kcq_resume(adev, xcc_id);
2410 	if (r)
2411 		return r;
2412 
2413 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2414 		ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
2415 		r = amdgpu_ring_test_helper(ring);
2416 		if (r)
2417 			return r;
2418 	}
2419 
2420 	return 0;
2421 }
2422 
2423 static int gfx_v12_1_cp_resume(struct amdgpu_device *adev)
2424 {
2425 	int i, r, num_xcc;
2426 
2427 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2428 
2429 	for (i = 0; i < num_xcc; i++) {
2430 		r = gfx_v12_1_xcc_cp_resume(adev, i);
2431 		if (r)
2432 			return r;
2433 	}
2434 
2435 	return 0;
2436 }
2437 
2438 static int gfx_v12_1_gfxhub_enable(struct amdgpu_device *adev)
2439 {
2440 	int r;
2441 	bool value;
2442 
2443 	r = adev->gfxhub.funcs->gart_enable(adev);
2444 	if (r)
2445 		return r;
2446 
2447 	value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
2448 		false : true;
2449 
2450 	adev->gfxhub.funcs->set_fault_enable_default(adev, value);
2451 	/* TODO investigate why TLB flush is needed,
2452 	 * are we missing a flush somewhere else? */
2453 	adev->gmc.gmc_funcs->flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0);
2454 
2455 	return 0;
2456 }
2457 
2458 static int get_gb_addr_config(struct amdgpu_device *adev)
2459 {
2460 	u32 gb_addr_config;
2461 
2462 	gb_addr_config = RREG32_SOC15(GC, GET_INST(GC, 0), regGB_ADDR_CONFIG_READ);
2463 	if (gb_addr_config == 0)
2464 		return -EINVAL;
2465 
2466 	adev->gfx.config.gb_addr_config_fields.num_pkrs =
2467 		1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG_READ, NUM_PKRS);
2468 
2469 	adev->gfx.config.gb_addr_config = gb_addr_config;
2470 
2471 	adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
2472 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2473 				      GB_ADDR_CONFIG_READ, NUM_PIPES);
2474 
2475 	adev->gfx.config.max_tile_pipes =
2476 		adev->gfx.config.gb_addr_config_fields.num_pipes;
2477 
2478 	adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
2479 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2480 				      GB_ADDR_CONFIG_READ, MAX_COMPRESSED_FRAGS);
2481 	adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
2482 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2483 				      GB_ADDR_CONFIG_READ, NUM_RB_PER_SE);
2484 	adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
2485 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2486 				      GB_ADDR_CONFIG_READ, NUM_SHADER_ENGINES);
2487 	adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
2488 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
2489 				      GB_ADDR_CONFIG_READ, PIPE_INTERLEAVE_SIZE));
2490 
2491 	return 0;
2492 }
2493 
2494 static void gfx_v12_1_xcc_disable_gpa_mode(struct amdgpu_device *adev,
2495 					   int xcc_id)
2496 {
2497 	uint32_t data;
2498 
2499 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG);
2500 	data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK;
2501 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data);
2502 
2503 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPG_PSP_DEBUG);
2504 	data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK;
2505 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPG_PSP_DEBUG, data);
2506 }
2507 
2508 static void gfx_v12_1_init_golden_registers(struct amdgpu_device *adev)
2509 {
2510 	uint32_t val;
2511 
2512 	/* Set the TCP UTCL0 register to enable atomics */
2513 	val = RREG32_SOC15(GC, 0, regTCP_UTCL0_CNTL1);
2514 	val = REG_SET_FIELD(val, TCP_UTCL0_CNTL1, ATOMIC_REQUESTER_EN, 0x1);
2515 
2516 	WREG32_SOC15(GC, 0, regTCP_UTCL0_CNTL1, val);
2517 }
2518 
2519 static int gfx_v12_1_hw_init(struct amdgpu_ip_block *ip_block)
2520 {
2521 	int r, i, num_xcc;
2522 	struct amdgpu_device *adev = ip_block->adev;
2523 
2524 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
2525 		/* rlc autoload firmware */
2526 		r = gfx_v12_1_rlc_backdoor_autoload_enable(adev);
2527 		if (r)
2528 			return r;
2529 	} else {
2530 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
2531 			num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2532 
2533 			if (adev->gfx.imu.funcs) {
2534 				if (adev->gfx.imu.funcs->load_microcode)
2535 					adev->gfx.imu.funcs->load_microcode(adev);
2536 			}
2537 
2538 			for (i = 0; i < num_xcc; i++) {
2539 				/* disable gpa mode in backdoor loading */
2540 				gfx_v12_1_xcc_disable_gpa_mode(adev, i);
2541 			}
2542 		}
2543 	}
2544 
2545 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) ||
2546 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2547 		r = gfx_v12_1_wait_for_rlc_autoload_complete(adev);
2548 		if (r) {
2549 			dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r);
2550 			return r;
2551 		}
2552 	}
2553 
2554 	adev->gfx.is_poweron = true;
2555 
2556 	if (get_gb_addr_config(adev))
2557 		DRM_WARN("Invalid gb_addr_config !\n");
2558 
2559 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2560 		gfx_v12_1_config_gfx_rs64(adev);
2561 
2562 	r = gfx_v12_1_gfxhub_enable(adev);
2563 	if (r)
2564 		return r;
2565 
2566 	gfx_v12_1_init_golden_registers(adev);
2567 
2568 	gfx_v12_1_constants_init(adev);
2569 
2570 	if (adev->nbio.funcs->gc_doorbell_init)
2571 		adev->nbio.funcs->gc_doorbell_init(adev);
2572 
2573 	r = gfx_v12_1_rlc_resume(adev);
2574 	if (r)
2575 		return r;
2576 
2577 	/*
2578 	 * init golden registers and rlc resume may override some registers,
2579 	 * reconfig them here
2580 	 */
2581 	gfx_v12_1_tcp_harvest(adev);
2582 
2583 	r = gfx_v12_1_cp_resume(adev);
2584 	if (r)
2585 		return r;
2586 
2587 	return r;
2588 }
2589 
2590 static void gfx_v12_1_xcc_fini(struct amdgpu_device *adev,
2591 			      int xcc_id)
2592 {
2593 	uint32_t tmp;
2594 
2595 	if (!adev->no_hw_access) {
2596 		if (amdgpu_gfx_disable_kcq(adev, xcc_id))
2597 			DRM_ERROR("KCQ disable failed\n");
2598 
2599 		amdgpu_mes_kiq_hw_fini(adev, xcc_id);
2600 	}
2601 
2602 	if (amdgpu_sriov_vf(adev)) {
2603 		/* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
2604 		tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
2605 		tmp &= 0xffffff00;
2606 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
2607 	}
2608 	gfx_v12_1_xcc_cp_compute_enable(adev, false, xcc_id);
2609 	gfx_v12_1_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
2610 }
2611 
2612 static int gfx_v12_1_hw_fini(struct amdgpu_ip_block *ip_block)
2613 {
2614 	struct amdgpu_device *adev = ip_block->adev;
2615 	int i, num_xcc;
2616 
2617 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
2618 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
2619 
2620 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2621 	for (i = 0; i < num_xcc; i++) {
2622 		gfx_v12_1_xcc_fini(adev, i);
2623 	}
2624 
2625 	adev->gfxhub.funcs->gart_disable(adev);
2626 
2627 	adev->gfx.is_poweron = false;
2628 
2629 	return 0;
2630 }
2631 
2632 static int gfx_v12_1_suspend(struct amdgpu_ip_block *ip_block)
2633 {
2634 	return gfx_v12_1_hw_fini(ip_block);
2635 }
2636 
2637 static int gfx_v12_1_resume(struct amdgpu_ip_block *ip_block)
2638 {
2639 	return gfx_v12_1_hw_init(ip_block);
2640 }
2641 
2642 static bool gfx_v12_1_is_idle(struct amdgpu_ip_block *ip_block)
2643 {
2644 	struct amdgpu_device *adev = ip_block->adev;
2645 	int i, num_xcc;
2646 
2647 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2648 	for (i = 0; i < num_xcc; i++) {
2649 		if (REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, i),
2650 				regGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE))
2651 			return false;
2652 	}
2653 	return true;
2654 }
2655 
2656 static int gfx_v12_1_wait_for_idle(struct amdgpu_ip_block *ip_block)
2657 {
2658 	unsigned i;
2659 	struct amdgpu_device *adev = ip_block->adev;
2660 
2661 	for (i = 0; i < adev->usec_timeout; i++) {
2662 		if (gfx_v12_1_is_idle(ip_block))
2663 			return 0;
2664 		udelay(1);
2665 	}
2666 	return -ETIMEDOUT;
2667 }
2668 
2669 static uint64_t gfx_v12_1_get_gpu_clock_counter(struct amdgpu_device *adev)
2670 {
2671 	uint64_t clock = 0;
2672 
2673 	if (adev->smuio.funcs &&
2674 	    adev->smuio.funcs->get_gpu_clock_counter)
2675 		clock = adev->smuio.funcs->get_gpu_clock_counter(adev);
2676 	else
2677 		dev_warn(adev->dev, "query gpu clock counter is not supported\n");
2678 
2679 	return clock;
2680 }
2681 
2682 static int gfx_v12_1_early_init(struct amdgpu_ip_block *ip_block)
2683 {
2684 	struct amdgpu_device *adev = ip_block->adev;
2685 
2686 	adev->gfx.funcs = &gfx_v12_1_gfx_funcs;
2687 
2688 	adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
2689 					  AMDGPU_MAX_COMPUTE_RINGS);
2690 
2691 	gfx_v12_1_set_kiq_pm4_funcs(adev);
2692 	gfx_v12_1_set_ring_funcs(adev);
2693 	gfx_v12_1_set_irq_funcs(adev);
2694 	gfx_v12_1_set_rlc_funcs(adev);
2695 	gfx_v12_1_set_mqd_funcs(adev);
2696 	gfx_v12_1_set_imu_funcs(adev);
2697 
2698 	gfx_v12_1_init_rlcg_reg_access_ctrl(adev);
2699 
2700 	return gfx_v12_1_init_microcode(adev);
2701 }
2702 
2703 static int gfx_v12_1_late_init(struct amdgpu_ip_block *ip_block)
2704 {
2705 	struct amdgpu_device *adev = ip_block->adev;
2706 	int r;
2707 
2708 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
2709 	if (r)
2710 		return r;
2711 
2712 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
2713 	if (r)
2714 		return r;
2715 
2716 	return 0;
2717 }
2718 
2719 static bool gfx_v12_1_is_rlc_enabled(struct amdgpu_device *adev)
2720 {
2721 	uint32_t rlc_cntl;
2722 
2723 	/* if RLC is not enabled, do nothing */
2724 	rlc_cntl = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL);
2725 	return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
2726 }
2727 
2728 static void gfx_v12_1_xcc_set_safe_mode(struct amdgpu_device *adev,
2729 					int xcc_id)
2730 {
2731 	uint32_t data;
2732 	unsigned i;
2733 
2734 	data = RLC_SAFE_MODE__CMD_MASK;
2735 	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
2736 
2737 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
2738 
2739 	/* wait for RLC_SAFE_MODE */
2740 	for (i = 0; i < adev->usec_timeout; i++) {
2741 		if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2742 						regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
2743 			break;
2744 		udelay(1);
2745 	}
2746 }
2747 
2748 static void gfx_v12_1_xcc_unset_safe_mode(struct amdgpu_device *adev,
2749 					  int xcc_id)
2750 {
2751 	WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2752 		     regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK);
2753 }
2754 
2755 static void gfx_v12_1_update_perf_clk(struct amdgpu_device *adev,
2756 				      bool enable)
2757 {
2758 	int i, num_xcc;
2759 
2760 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2761 	for (i = 0; i < num_xcc; i++)
2762 		gfx_v12_1_xcc_update_perf_clk(adev, enable, i);
2763 }
2764 
2765 static void gfx_v12_1_update_spm_vmid(struct amdgpu_device *adev,
2766 				      int xcc_id,
2767 				      struct amdgpu_ring *ring,
2768 				      unsigned vmid)
2769 {
2770 	u32 reg, data;
2771 
2772 	reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL);
2773 	if (amdgpu_sriov_is_pp_one_vf(adev))
2774 		data = RREG32_NO_KIQ(reg);
2775 	else
2776 		data = RREG32(reg);
2777 
2778 	data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
2779 	data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
2780 
2781 	if (amdgpu_sriov_is_pp_one_vf(adev))
2782 		WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL, data);
2783 	else
2784 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL, data);
2785 
2786 	if (ring
2787 	    && amdgpu_sriov_is_pp_one_vf(adev)
2788 	    && ((ring->funcs->type == AMDGPU_RING_TYPE_GFX)
2789 		|| (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE))) {
2790 		uint32_t reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL);
2791 		amdgpu_ring_emit_wreg(ring, reg, data);
2792 	}
2793 }
2794 
2795 static const struct amdgpu_rlc_funcs gfx_v12_1_rlc_funcs = {
2796 	.is_rlc_enabled = gfx_v12_1_is_rlc_enabled,
2797 	.set_safe_mode = gfx_v12_1_xcc_set_safe_mode,
2798 	.unset_safe_mode = gfx_v12_1_xcc_unset_safe_mode,
2799 	.init = gfx_v12_1_rlc_init,
2800 	.get_csb_size = gfx_v12_1_get_csb_size,
2801 	.get_csb_buffer = gfx_v12_1_get_csb_buffer,
2802 	.resume = gfx_v12_1_rlc_resume,
2803 	.stop = gfx_v12_1_rlc_stop,
2804 	.reset = gfx_v12_1_rlc_reset,
2805 	.start = gfx_v12_1_rlc_start,
2806 	.update_spm_vmid = gfx_v12_1_update_spm_vmid,
2807 };
2808 
2809 #if 0
2810 static void gfx_v12_cntl_power_gating(struct amdgpu_device *adev, bool enable)
2811 {
2812 	/* TODO */
2813 }
2814 
2815 static void gfx_v12_cntl_pg(struct amdgpu_device *adev, bool enable)
2816 {
2817 	/* TODO */
2818 }
2819 #endif
2820 
2821 static int gfx_v12_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
2822 					   enum amd_powergating_state state)
2823 {
2824 	struct amdgpu_device *adev = ip_block->adev;
2825 	bool enable = (state == AMD_PG_STATE_GATE);
2826 
2827 	if (amdgpu_sriov_vf(adev))
2828 		return 0;
2829 
2830 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2831 	case IP_VERSION(12, 1, 0):
2832 		amdgpu_gfx_off_ctrl(adev, enable);
2833 		break;
2834 	default:
2835 		break;
2836 	}
2837 
2838 	return 0;
2839 }
2840 
2841 static void gfx_v12_1_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
2842 							   bool enable, int xcc_id)
2843 {
2844 	uint32_t def, data;
2845 
2846 	if (!(adev->cg_flags &
2847 	      (AMD_CG_SUPPORT_GFX_CGCG |
2848 	      AMD_CG_SUPPORT_GFX_CGLS |
2849 	      AMD_CG_SUPPORT_GFX_3D_CGCG |
2850 	      AMD_CG_SUPPORT_GFX_3D_CGLS)))
2851 		return;
2852 
2853 	if (enable) {
2854 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2855 					  regRLC_CGTT_MGCG_OVERRIDE);
2856 
2857 		/* unset CGCG override */
2858 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
2859 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
2860 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2861 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2862 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG ||
2863 		    adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
2864 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
2865 
2866 		/* update CGCG override bits */
2867 		if (def != data)
2868 			WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2869 				     regRLC_CGTT_MGCG_OVERRIDE, data);
2870 
2871 		/* enable cgcg FSM(0x0000363F) */
2872 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2873 
2874 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
2875 			data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK;
2876 			data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
2877 				 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
2878 		}
2879 
2880 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
2881 			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK;
2882 			data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
2883 				 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
2884 		}
2885 
2886 		if (def != data)
2887 			WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2888 				     regRLC_CGCG_CGLS_CTRL, data);
2889 
2890 		/* set IDLE_POLL_COUNT(0x00900100) */
2891 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL);
2892 
2893 		data &= ~CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK;
2894 		data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
2895 		data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
2896 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2897 
2898 		if (def != data)
2899 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data);
2900 
2901 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL);
2902 		data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
2903 		data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
2904 		data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
2905 		data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
2906 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL, data);
2907 	} else {
2908 		/* Program RLC_CGCG_CGLS_CTRL */
2909 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2910 
2911 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
2912 			data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
2913 
2914 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2915 			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
2916 
2917 		if (def != data)
2918 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
2919 	}
2920 }
2921 
2922 static void gfx_v12_1_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev,
2923 							   bool enable, int xcc_id)
2924 {
2925 	uint32_t data, def;
2926 	if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)))
2927 		return;
2928 
2929 	/* It is disabled by HW by default */
2930 	if (enable) {
2931 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
2932 			/* 1 - RLC_CGTT_MGCG_OVERRIDE */
2933 			def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2934 
2935 			data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2936 				  RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2937 				  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
2938 
2939 			if (def != data)
2940 				WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2941 		}
2942 	} else {
2943 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
2944 			def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2945 
2946 			data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2947 				 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2948 				 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
2949 
2950 			if (def != data)
2951 				WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2952 		}
2953 	}
2954 }
2955 
2956 static void gfx_v12_1_xcc_update_repeater_fgcg(struct amdgpu_device *adev,
2957 					       bool enable, int xcc_id)
2958 {
2959 	uint32_t def, data;
2960 
2961 	if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
2962 		return;
2963 
2964 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2965 
2966 	if (enable)
2967 		data &= ~(RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK |
2968 				  RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE_MASK);
2969 	else
2970 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK |
2971 				RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE_MASK;
2972 
2973 	if (def != data)
2974 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2975 }
2976 
2977 static void gfx_v12_1_xcc_update_sram_fgcg(struct amdgpu_device *adev,
2978 					   bool enable, int xcc_id)
2979 {
2980 	uint32_t def, data;
2981 
2982 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
2983 		return;
2984 
2985 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2986 
2987 	if (enable)
2988 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
2989 	else
2990 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
2991 
2992 	if (def != data)
2993 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2994 }
2995 
2996 static void gfx_v12_1_xcc_update_perf_clk(struct amdgpu_device *adev,
2997 					  bool enable, int xcc_id)
2998 {
2999 	uint32_t def, data;
3000 
3001 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK))
3002 		return;
3003 
3004 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
3005 
3006 	if (enable)
3007 		data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
3008 	else
3009 		data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
3010 
3011 	if (def != data)
3012 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
3013 }
3014 
3015 static int gfx_v12_1_xcc_update_gfx_clock_gating(struct amdgpu_device *adev,
3016 					     bool enable, int xcc_id)
3017 {
3018 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
3019 
3020 	gfx_v12_1_xcc_update_coarse_grain_clock_gating(adev, enable, xcc_id);
3021 
3022 	gfx_v12_1_xcc_update_medium_grain_clock_gating(adev, enable, xcc_id);
3023 
3024 	gfx_v12_1_xcc_update_repeater_fgcg(adev, enable, xcc_id);
3025 
3026 	gfx_v12_1_xcc_update_sram_fgcg(adev, enable, xcc_id);
3027 
3028 	gfx_v12_1_xcc_update_perf_clk(adev, enable, xcc_id);
3029 
3030 	if (adev->cg_flags &
3031 	    (AMD_CG_SUPPORT_GFX_MGCG |
3032 	     AMD_CG_SUPPORT_GFX_CGLS |
3033 	     AMD_CG_SUPPORT_GFX_CGCG |
3034 	     AMD_CG_SUPPORT_GFX_3D_CGCG |
3035 	     AMD_CG_SUPPORT_GFX_3D_CGLS))
3036 		gfx_v12_1_xcc_enable_gui_idle_interrupt(adev, enable, xcc_id);
3037 
3038 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
3039 
3040 	return 0;
3041 }
3042 
3043 static int gfx_v12_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
3044 					   enum amd_clockgating_state state)
3045 {
3046 	struct amdgpu_device *adev = ip_block->adev;
3047 	int i, num_xcc;
3048 
3049 	if (amdgpu_sriov_vf(adev))
3050 		return 0;
3051 
3052 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3053 	switch (adev->ip_versions[GC_HWIP][0]) {
3054 	case IP_VERSION(12, 1, 0):
3055 		for (i = 0; i < num_xcc; i++)
3056 			gfx_v12_1_xcc_update_gfx_clock_gating(adev,
3057 				  state == AMD_CG_STATE_GATE, i);
3058 		break;
3059 	default:
3060 		break;
3061 	}
3062 
3063 	return 0;
3064 }
3065 
3066 static void gfx_v12_1_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
3067 {
3068 	struct amdgpu_device *adev = ip_block->adev;
3069 	int data;
3070 
3071 	/* AMD_CG_SUPPORT_GFX_MGCG */
3072 	data = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CGTT_MGCG_OVERRIDE);
3073 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
3074 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
3075 
3076 	/* AMD_CG_SUPPORT_REPEATER_FGCG */
3077 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK))
3078 		*flags |= AMD_CG_SUPPORT_REPEATER_FGCG;
3079 
3080 	/* AMD_CG_SUPPORT_GFX_FGCG */
3081 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK))
3082 		*flags |= AMD_CG_SUPPORT_GFX_FGCG;
3083 
3084 	/* AMD_CG_SUPPORT_GFX_PERF_CLK */
3085 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK))
3086 		*flags |= AMD_CG_SUPPORT_GFX_PERF_CLK;
3087 
3088 	/* AMD_CG_SUPPORT_GFX_CGCG */
3089 	data = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CGCG_CGLS_CTRL);
3090 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
3091 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
3092 
3093 	/* AMD_CG_SUPPORT_GFX_CGLS */
3094 	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
3095 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
3096 }
3097 
3098 static u64 gfx_v12_1_ring_get_rptr_compute(struct amdgpu_ring *ring)
3099 {
3100 	/* gfx12 hardware is 32bit rptr */
3101 	return *(uint32_t *)ring->rptr_cpu_addr;
3102 }
3103 
3104 static u64 gfx_v12_1_ring_get_wptr_compute(struct amdgpu_ring *ring)
3105 {
3106 	u64 wptr;
3107 
3108 	/* XXX check if swapping is necessary on BE */
3109 	if (ring->use_doorbell)
3110 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
3111 	else
3112 		BUG();
3113 	return wptr;
3114 }
3115 
3116 static void gfx_v12_1_ring_set_wptr_compute(struct amdgpu_ring *ring)
3117 {
3118 	struct amdgpu_device *adev = ring->adev;
3119 
3120 	/* XXX check if swapping is necessary on BE */
3121 	if (ring->use_doorbell) {
3122 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
3123 			     ring->wptr);
3124 		WDOORBELL64(ring->doorbell_index, ring->wptr);
3125 	} else {
3126 		BUG(); /* only DOORBELL method supported on gfx12 now */
3127 	}
3128 }
3129 
3130 static void gfx_v12_1_ring_emit_ib_compute(struct amdgpu_ring *ring,
3131 					   struct amdgpu_job *job,
3132 					   struct amdgpu_ib *ib,
3133 					   uint32_t flags)
3134 {
3135 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
3136 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
3137 
3138 	/* Currently, there is a high possibility to get wave ID mismatch
3139 	 * between ME and GDS, leading to a hw deadlock, because ME generates
3140 	 * different wave IDs than the GDS expects. This situation happens
3141 	 * randomly when at least 5 compute pipes use GDS ordered append.
3142 	 * The wave IDs generated by ME are also wrong after suspend/resume.
3143 	 * Those are probably bugs somewhere else in the kernel driver.
3144 	 *
3145 	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
3146 	 * GDS to 0 for this ring (me/pipe).
3147 	 */
3148 	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
3149 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3150 		amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
3151 	}
3152 
3153 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3154 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3155 	amdgpu_ring_write(ring,
3156 #ifdef __BIG_ENDIAN
3157 				(2 << 0) |
3158 #endif
3159 				lower_32_bits(ib->gpu_addr));
3160 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
3161 	amdgpu_ring_write(ring, control);
3162 }
3163 
3164 static void gfx_v12_1_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
3165 				     u64 seq, unsigned flags)
3166 {
3167 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
3168 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
3169 
3170 	/* RELEASE_MEM - flush caches, send int */
3171 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
3172 	amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ(1) |
3173 				 PACKET3_RELEASE_MEM_GCR_GLV_WB |
3174 				 PACKET3_RELEASE_MEM_GCR_GL2_WB |
3175 				 PACKET3_RELEASE_MEM_GCR_GL2_SCOPE(2) |
3176 				 PACKET3_RELEASE_MEM_TEMPORAL(3) |
3177 				 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
3178 				 PACKET3_RELEASE_MEM_EVENT_INDEX(5)));
3179 	amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) |
3180 				 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0)));
3181 
3182 	/*
3183 	 * the address should be Qword aligned if 64bit write, Dword
3184 	 * aligned if only send 32bit data low (discard data high)
3185 	 */
3186 	if (write64bit)
3187 		BUG_ON(addr & 0x7);
3188 	else
3189 		BUG_ON(addr & 0x3);
3190 	amdgpu_ring_write(ring, lower_32_bits(addr));
3191 	amdgpu_ring_write(ring, upper_32_bits(addr));
3192 	amdgpu_ring_write(ring, lower_32_bits(seq));
3193 	amdgpu_ring_write(ring, upper_32_bits(seq));
3194 	amdgpu_ring_write(ring, 0);
3195 }
3196 
3197 static void gfx_v12_1_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
3198 {
3199 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3200 	uint32_t seq = ring->fence_drv.sync_seq;
3201 	uint64_t addr = ring->fence_drv.gpu_addr;
3202 
3203 	gfx_v12_1_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr),
3204 			       upper_32_bits(addr), seq, 0xffffffff, 4);
3205 }
3206 
3207 static void gfx_v12_1_ring_invalidate_tlbs(struct amdgpu_ring *ring,
3208 				   uint16_t pasid, uint32_t flush_type,
3209 				   bool all_hub, uint8_t dst_sel)
3210 {
3211 	amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
3212 	amdgpu_ring_write(ring,
3213 			  PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) |
3214 			  PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
3215 			  PACKET3_INVALIDATE_TLBS_PASID(pasid) |
3216 			  PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
3217 }
3218 
3219 static void gfx_v12_1_ring_emit_vm_flush(struct amdgpu_ring *ring,
3220 					 unsigned vmid, uint64_t pd_addr)
3221 {
3222 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
3223 
3224 	/* compute doesn't have PFP */
3225 	if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
3226 		/* sync PFP to ME, otherwise we might get invalid PFP reads */
3227 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3228 		amdgpu_ring_write(ring, 0x0);
3229 	}
3230 }
3231 
3232 static void gfx_v12_1_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
3233 					  u64 seq, unsigned int flags)
3234 {
3235 	struct amdgpu_device *adev = ring->adev;
3236 
3237 	/* we only allocate 32bit for each seq wb address */
3238 	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
3239 
3240 	/* write fence seq to the "addr" */
3241 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3242 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3243 				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
3244 	amdgpu_ring_write(ring, lower_32_bits(addr));
3245 	amdgpu_ring_write(ring, upper_32_bits(addr));
3246 	amdgpu_ring_write(ring, lower_32_bits(seq));
3247 
3248 	if (flags & AMDGPU_FENCE_FLAG_INT) {
3249 		/* set register to trigger INT */
3250 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3251 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3252 					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
3253 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCPC_INT_STATUS));
3254 		amdgpu_ring_write(ring, 0);
3255 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
3256 	}
3257 }
3258 
3259 static void gfx_v12_1_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
3260 				     uint32_t reg_val_offs)
3261 {
3262 	struct amdgpu_device *adev = ring->adev;
3263 
3264 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
3265 	amdgpu_ring_write(ring, 0 |	/* src: register*/
3266 				(5 << 8) |	/* dst: memory */
3267 				(1 << 20));	/* write confirm */
3268 	amdgpu_ring_write(ring, reg);
3269 	amdgpu_ring_write(ring, 0);
3270 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
3271 				reg_val_offs * 4));
3272 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
3273 				reg_val_offs * 4));
3274 }
3275 
3276 static void gfx_v12_1_ring_emit_wreg(struct amdgpu_ring *ring,
3277 				     uint32_t reg,
3278 				     uint32_t val)
3279 {
3280 	uint32_t cmd = 0;
3281 
3282 	switch (ring->funcs->type) {
3283 	case AMDGPU_RING_TYPE_KIQ:
3284 		cmd = (1 << 16); /* no inc addr */
3285 		break;
3286 	default:
3287 		cmd = WR_CONFIRM;
3288 		break;
3289 	}
3290 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3291 	amdgpu_ring_write(ring, cmd);
3292 	amdgpu_ring_write(ring, reg);
3293 	amdgpu_ring_write(ring, 0);
3294 	amdgpu_ring_write(ring, val);
3295 }
3296 
3297 static void gfx_v12_1_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
3298 					uint32_t val, uint32_t mask)
3299 {
3300 	gfx_v12_1_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
3301 }
3302 
3303 static void gfx_v12_1_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
3304 						   uint32_t reg0, uint32_t reg1,
3305 						   uint32_t ref, uint32_t mask)
3306 {
3307 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3308 
3309 	gfx_v12_1_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
3310 			       ref, mask, 0x20);
3311 }
3312 
3313 static void gfx_v12_1_xcc_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
3314 							int me, int pipe,
3315 							enum amdgpu_interrupt_state state,
3316 							int xcc_id)
3317 {
3318 	u32 mec_int_cntl, mec_int_cntl_reg;
3319 
3320 	/*
3321 	 * amdgpu controls only the first MEC. That's why this function only
3322 	 * handles the setting of interrupts for this specific MEC. All other
3323 	 * pipes' interrupts are set by amdkfd.
3324 	 */
3325 
3326 	if (me == 1) {
3327 		switch (pipe) {
3328 		case 0:
3329 			mec_int_cntl_reg = SOC15_REG_OFFSET(
3330 					GC, GET_INST(GC, xcc_id),
3331 					regCP_ME1_PIPE0_INT_CNTL);
3332 			break;
3333 		case 1:
3334 			mec_int_cntl_reg = SOC15_REG_OFFSET(
3335 					GC, GET_INST(GC, xcc_id),
3336 					regCP_ME1_PIPE1_INT_CNTL);
3337 			break;
3338 		case 2:
3339 			mec_int_cntl_reg = SOC15_REG_OFFSET(
3340 					GC, GET_INST(GC, xcc_id),
3341 					regCP_ME1_PIPE2_INT_CNTL);
3342 			break;
3343 		case 3:
3344 			mec_int_cntl_reg = SOC15_REG_OFFSET(
3345 					GC, GET_INST(GC, xcc_id),
3346 					regCP_ME1_PIPE3_INT_CNTL);
3347 			break;
3348 		default:
3349 			DRM_DEBUG("invalid pipe %d\n", pipe);
3350 			return;
3351 		}
3352 	} else {
3353 		DRM_DEBUG("invalid me %d\n", me);
3354 		return;
3355 	}
3356 
3357 	switch (state) {
3358 	case AMDGPU_IRQ_STATE_DISABLE:
3359 		mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3360 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3361 					     TIME_STAMP_INT_ENABLE, 0);
3362 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3363 					     GENERIC0_INT_ENABLE, 0);
3364 		WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3365 		break;
3366 	case AMDGPU_IRQ_STATE_ENABLE:
3367 		mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3368 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3369 					     TIME_STAMP_INT_ENABLE, 1);
3370 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3371 					     GENERIC0_INT_ENABLE, 1);
3372 		WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3373 		break;
3374 	default:
3375 		break;
3376 	}
3377 }
3378 
3379 static int gfx_v12_1_set_eop_interrupt_state(struct amdgpu_device *adev,
3380 					    struct amdgpu_irq_src *src,
3381 					    unsigned type,
3382 					    enum amdgpu_interrupt_state state)
3383 {
3384 	int i, num_xcc;
3385 
3386 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3387 	for (i = 0; i < num_xcc; i++) {
3388 		switch (type) {
3389 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
3390 			gfx_v12_1_xcc_set_compute_eop_interrupt_state(
3391 					adev, 1, 0, state, i);
3392 			break;
3393 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
3394 			gfx_v12_1_xcc_set_compute_eop_interrupt_state(
3395 					adev, 1, 1, state, i);
3396 			break;
3397 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
3398 			gfx_v12_1_xcc_set_compute_eop_interrupt_state(
3399 					adev, 1, 2, state, i);
3400 			break;
3401 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
3402 			gfx_v12_1_xcc_set_compute_eop_interrupt_state(
3403 					adev, 1, 3, state, i);
3404 			break;
3405 		default:
3406 			break;
3407 		}
3408 	}
3409 
3410 	return 0;
3411 }
3412 
3413 static int gfx_v12_1_eop_irq(struct amdgpu_device *adev,
3414 			     struct amdgpu_irq_src *source,
3415 			     struct amdgpu_iv_entry *entry)
3416 {
3417 	int i;
3418 	u8 me_id, pipe_id, queue_id;
3419 	struct amdgpu_ring *ring;
3420 	uint32_t mes_queue_id = entry->src_data[0];
3421 
3422 	DRM_DEBUG("IH: CP EOP\n");
3423 
3424 	if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
3425 		struct amdgpu_mes_queue *queue;
3426 
3427 		mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
3428 
3429 		spin_lock(&adev->mes.queue_id_lock);
3430 		queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
3431 		if (queue) {
3432 			DRM_DEBUG("process mes queue id = %d\n", mes_queue_id);
3433 			amdgpu_fence_process(queue->ring);
3434 		}
3435 		spin_unlock(&adev->mes.queue_id_lock);
3436 	} else {
3437 		me_id = (entry->ring_id & 0x0c) >> 2;
3438 		pipe_id = (entry->ring_id & 0x03) >> 0;
3439 		queue_id = (entry->ring_id & 0x70) >> 4;
3440 
3441 		switch (me_id) {
3442 		case 0:
3443 			if (pipe_id == 0)
3444 				amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
3445 			else
3446 				amdgpu_fence_process(&adev->gfx.gfx_ring[1]);
3447 			break;
3448 		case 1:
3449 		case 2:
3450 			for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3451 				ring = &adev->gfx.compute_ring[i];
3452 				/* Per-queue interrupt is supported for MEC starting from VI.
3453 				 * The interrupt can only be enabled/disabled per pipe instead
3454 				 * of per queue.
3455 				 */
3456 				if ((ring->me == me_id) &&
3457 				    (ring->pipe == pipe_id) &&
3458 				    (ring->queue == queue_id))
3459 					amdgpu_fence_process(ring);
3460 			}
3461 			break;
3462 		}
3463 	}
3464 
3465 	return 0;
3466 }
3467 
3468 static int gfx_v12_1_set_priv_reg_fault_state(struct amdgpu_device *adev,
3469 					      struct amdgpu_irq_src *source,
3470 					      unsigned type,
3471 					      enum amdgpu_interrupt_state state)
3472 {
3473 	int i, num_xcc;
3474 
3475 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3476 	switch (state) {
3477 	case AMDGPU_IRQ_STATE_DISABLE:
3478 	case AMDGPU_IRQ_STATE_ENABLE:
3479 		for (i = 0; i < num_xcc; i++)
3480 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3481 					      PRIV_REG_INT_ENABLE,
3482 					      state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3483 		break;
3484 	default:
3485 		break;
3486 	}
3487 
3488 	return 0;
3489 }
3490 
3491 static int gfx_v12_1_set_priv_inst_fault_state(struct amdgpu_device *adev,
3492 					       struct amdgpu_irq_src *source,
3493 					       unsigned type,
3494 					       enum amdgpu_interrupt_state state)
3495 {
3496 	int i, num_xcc;
3497 
3498 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3499 	switch (state) {
3500 	case AMDGPU_IRQ_STATE_DISABLE:
3501 	case AMDGPU_IRQ_STATE_ENABLE:
3502 		for (i = 0; i < num_xcc; i++)
3503 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3504 				       PRIV_INSTR_INT_ENABLE,
3505 				       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3506 		break;
3507 	default:
3508 		break;
3509 	}
3510 
3511 	return 0;
3512 }
3513 
3514 static void gfx_v12_1_handle_priv_fault(struct amdgpu_device *adev,
3515 					struct amdgpu_iv_entry *entry)
3516 {
3517 	u8 me_id, pipe_id, queue_id;
3518 	struct amdgpu_ring *ring;
3519 	int i;
3520 
3521 	me_id = (entry->ring_id & 0x0c) >> 2;
3522 	pipe_id = (entry->ring_id & 0x03) >> 0;
3523 	queue_id = (entry->ring_id & 0x70) >> 4;
3524 
3525 	switch (me_id) {
3526 	case 0:
3527 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3528 			ring = &adev->gfx.gfx_ring[i];
3529 			/* we only enabled 1 gfx queue per pipe for now */
3530 			if (ring->me == me_id && ring->pipe == pipe_id)
3531 				drm_sched_fault(&ring->sched);
3532 		}
3533 		break;
3534 	case 1:
3535 	case 2:
3536 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3537 			ring = &adev->gfx.compute_ring[i];
3538 			if (ring->me == me_id && ring->pipe == pipe_id &&
3539 			    ring->queue == queue_id)
3540 				drm_sched_fault(&ring->sched);
3541 		}
3542 		break;
3543 	default:
3544 		BUG();
3545 		break;
3546 	}
3547 }
3548 
3549 static int gfx_v12_1_priv_reg_irq(struct amdgpu_device *adev,
3550 				  struct amdgpu_irq_src *source,
3551 				  struct amdgpu_iv_entry *entry)
3552 {
3553 	DRM_ERROR("Illegal register access in command stream\n");
3554 	gfx_v12_1_handle_priv_fault(adev, entry);
3555 	return 0;
3556 }
3557 
3558 static int gfx_v12_1_priv_inst_irq(struct amdgpu_device *adev,
3559 				   struct amdgpu_irq_src *source,
3560 				   struct amdgpu_iv_entry *entry)
3561 {
3562 	DRM_ERROR("Illegal instruction in command stream\n");
3563 	gfx_v12_1_handle_priv_fault(adev, entry);
3564 	return 0;
3565 }
3566 
3567 static void gfx_v12_1_emit_mem_sync(struct amdgpu_ring *ring)
3568 {
3569 	const unsigned int gcr_cntl =
3570 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) |
3571 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) |
3572 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) |
3573 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) |
3574 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1) |
3575 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_SCOPE(2);
3576 
3577 	/* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */
3578 	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6));
3579 	amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */
3580 	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
3581 	amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
3582 	amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
3583 	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
3584 	amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
3585 	amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
3586 }
3587 
3588 static const struct amd_ip_funcs gfx_v12_1_ip_funcs = {
3589 	.name = "gfx_v12_1",
3590 	.early_init = gfx_v12_1_early_init,
3591 	.late_init = gfx_v12_1_late_init,
3592 	.sw_init = gfx_v12_1_sw_init,
3593 	.sw_fini = gfx_v12_1_sw_fini,
3594 	.hw_init = gfx_v12_1_hw_init,
3595 	.hw_fini = gfx_v12_1_hw_fini,
3596 	.suspend = gfx_v12_1_suspend,
3597 	.resume = gfx_v12_1_resume,
3598 	.is_idle = gfx_v12_1_is_idle,
3599 	.wait_for_idle = gfx_v12_1_wait_for_idle,
3600 	.set_clockgating_state = gfx_v12_1_set_clockgating_state,
3601 	.set_powergating_state = gfx_v12_1_set_powergating_state,
3602 	.get_clockgating_state = gfx_v12_1_get_clockgating_state,
3603 };
3604 
3605 static const struct amdgpu_ring_funcs gfx_v12_1_ring_funcs_compute = {
3606 	.type = AMDGPU_RING_TYPE_COMPUTE,
3607 	.align_mask = 0xff,
3608 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
3609 	.support_64bit_ptrs = true,
3610 	.get_rptr = gfx_v12_1_ring_get_rptr_compute,
3611 	.get_wptr = gfx_v12_1_ring_get_wptr_compute,
3612 	.set_wptr = gfx_v12_1_ring_set_wptr_compute,
3613 	.emit_frame_size =
3614 		7 + /* gfx_v12_1_ring_emit_pipeline_sync */
3615 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
3616 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
3617 		2 + /* gfx_v12_1_ring_emit_vm_flush */
3618 		8 + 8 + 8 + /* gfx_v12_1_ring_emit_fence x3 for user fence, vm fence */
3619 		8, /* gfx_v12_1_emit_mem_sync */
3620 	.emit_ib_size =	7, /* gfx_v12_1_ring_emit_ib_compute */
3621 	.emit_ib = gfx_v12_1_ring_emit_ib_compute,
3622 	.emit_fence = gfx_v12_1_ring_emit_fence,
3623 	.emit_pipeline_sync = gfx_v12_1_ring_emit_pipeline_sync,
3624 	.emit_vm_flush = gfx_v12_1_ring_emit_vm_flush,
3625 	.test_ring = gfx_v12_1_ring_test_ring,
3626 	.test_ib = gfx_v12_1_ring_test_ib,
3627 	.insert_nop = amdgpu_ring_insert_nop,
3628 	.pad_ib = amdgpu_ring_generic_pad_ib,
3629 	.emit_wreg = gfx_v12_1_ring_emit_wreg,
3630 	.emit_reg_wait = gfx_v12_1_ring_emit_reg_wait,
3631 	.emit_reg_write_reg_wait = gfx_v12_1_ring_emit_reg_write_reg_wait,
3632 	.emit_mem_sync = gfx_v12_1_emit_mem_sync,
3633 };
3634 
3635 static const struct amdgpu_ring_funcs gfx_v12_1_ring_funcs_kiq = {
3636 	.type = AMDGPU_RING_TYPE_KIQ,
3637 	.align_mask = 0xff,
3638 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
3639 	.support_64bit_ptrs = true,
3640 	.get_rptr = gfx_v12_1_ring_get_rptr_compute,
3641 	.get_wptr = gfx_v12_1_ring_get_wptr_compute,
3642 	.set_wptr = gfx_v12_1_ring_set_wptr_compute,
3643 	.emit_frame_size =
3644 		7 + /* gfx_v12_1_ring_emit_pipeline_sync */
3645 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
3646 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
3647 		2 + /* gfx_v12_1_ring_emit_vm_flush */
3648 		8 + 8 + 8, /* gfx_v12_1_ring_emit_fence_kiq x3 for user fence, vm fence */
3649 	.emit_ib_size =	7, /* gfx_v12_1_ring_emit_ib_compute */
3650 	.emit_ib = gfx_v12_1_ring_emit_ib_compute,
3651 	.emit_fence = gfx_v12_1_ring_emit_fence_kiq,
3652 	.test_ring = gfx_v12_1_ring_test_ring,
3653 	.test_ib = gfx_v12_1_ring_test_ib,
3654 	.insert_nop = amdgpu_ring_insert_nop,
3655 	.pad_ib = amdgpu_ring_generic_pad_ib,
3656 	.emit_rreg = gfx_v12_1_ring_emit_rreg,
3657 	.emit_wreg = gfx_v12_1_ring_emit_wreg,
3658 	.emit_reg_wait = gfx_v12_1_ring_emit_reg_wait,
3659 	.emit_reg_write_reg_wait = gfx_v12_1_ring_emit_reg_write_reg_wait,
3660 };
3661 
3662 static void gfx_v12_1_set_ring_funcs(struct amdgpu_device *adev)
3663 {
3664 	int i, j, num_xcc;
3665 
3666 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3667 	for (i = 0; i < num_xcc; i++) {
3668 		adev->gfx.kiq[i].ring.funcs = &gfx_v12_1_ring_funcs_kiq;
3669 
3670 		for (j = 0; j < adev->gfx.num_compute_rings; j++)
3671 			adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs =
3672 						&gfx_v12_1_ring_funcs_compute;
3673 	}
3674 }
3675 
3676 static const struct amdgpu_irq_src_funcs gfx_v12_1_eop_irq_funcs = {
3677 	.set = gfx_v12_1_set_eop_interrupt_state,
3678 	.process = gfx_v12_1_eop_irq,
3679 };
3680 
3681 static const struct amdgpu_irq_src_funcs gfx_v12_1_priv_reg_irq_funcs = {
3682 	.set = gfx_v12_1_set_priv_reg_fault_state,
3683 	.process = gfx_v12_1_priv_reg_irq,
3684 };
3685 
3686 static const struct amdgpu_irq_src_funcs gfx_v12_1_priv_inst_irq_funcs = {
3687 	.set = gfx_v12_1_set_priv_inst_fault_state,
3688 	.process = gfx_v12_1_priv_inst_irq,
3689 };
3690 
3691 static void gfx_v12_1_set_irq_funcs(struct amdgpu_device *adev)
3692 {
3693 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
3694 	adev->gfx.eop_irq.funcs = &gfx_v12_1_eop_irq_funcs;
3695 
3696 	adev->gfx.priv_reg_irq.num_types = 1;
3697 	adev->gfx.priv_reg_irq.funcs = &gfx_v12_1_priv_reg_irq_funcs;
3698 
3699 	adev->gfx.priv_inst_irq.num_types = 1;
3700 	adev->gfx.priv_inst_irq.funcs = &gfx_v12_1_priv_inst_irq_funcs;
3701 }
3702 
3703 static void gfx_v12_1_set_imu_funcs(struct amdgpu_device *adev)
3704 {
3705 	if (adev->flags & AMD_IS_APU)
3706 		adev->gfx.imu.mode = MISSION_MODE;
3707 	else
3708 		adev->gfx.imu.mode = DEBUG_MODE;
3709 
3710 	adev->gfx.imu.funcs = &gfx_v12_1_imu_funcs;
3711 }
3712 
3713 static void gfx_v12_1_set_rlc_funcs(struct amdgpu_device *adev)
3714 {
3715 	adev->gfx.rlc.funcs = &gfx_v12_1_rlc_funcs;
3716 }
3717 
3718 static void gfx_v12_1_set_mqd_funcs(struct amdgpu_device *adev)
3719 {
3720 	/* set compute eng mqd */
3721 	adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size =
3722 		sizeof(struct v12_1_compute_mqd);
3723 	adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd =
3724 		gfx_v12_1_compute_mqd_init;
3725 }
3726 
3727 static void gfx_v12_1_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev,
3728 							  u32 bitmap, int xcc_id)
3729 {
3730 	u32 data;
3731 
3732 	if (!bitmap)
3733 		return;
3734 
3735 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
3736 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
3737 
3738 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data);
3739 }
3740 
3741 static u32 gfx_v12_1_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev,
3742 						  int xcc_id)
3743 {
3744 	u32 data, wgp_bitmask;
3745 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG);
3746 	data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG);
3747 
3748 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
3749 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
3750 
3751 	wgp_bitmask =
3752 		amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1);
3753 
3754 	return (~data) & wgp_bitmask;
3755 }
3756 
3757 static u32 gfx_v12_1_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev,
3758 						 int xcc_id)
3759 {
3760 	u32 wgp_idx, wgp_active_bitmap;
3761 	u32 cu_bitmap_per_wgp, cu_active_bitmap;
3762 
3763 	wgp_active_bitmap = gfx_v12_1_get_wgp_active_bitmap_per_sh(adev, xcc_id);
3764 	cu_active_bitmap = 0;
3765 
3766 	for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) {
3767 		/* if there is one WGP enabled, it means 2 CUs will be enabled */
3768 		cu_bitmap_per_wgp = 3 << (2 * wgp_idx);
3769 		if (wgp_active_bitmap & (1 << wgp_idx))
3770 			cu_active_bitmap |= cu_bitmap_per_wgp;
3771 	}
3772 
3773 	return cu_active_bitmap;
3774 }
3775 
3776 static int gfx_v12_1_get_cu_info(struct amdgpu_device *adev,
3777 				 struct amdgpu_cu_info *cu_info)
3778 {
3779 	int i, j, k, counter, xcc_id, active_cu_number = 0;
3780 	u32 mask, bitmap;
3781 	unsigned disable_masks[8 * 2];
3782 
3783 	if (!adev || !cu_info)
3784 		return -EINVAL;
3785 
3786 	amdgpu_gfx_parse_disable_cu(disable_masks, 8, 2);
3787 
3788 	mutex_lock(&adev->grbm_idx_mutex);
3789 	for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
3790 		for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3791 			for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3792 				bitmap = i * adev->gfx.config.max_sh_per_se + j;
3793 				if (!((gfx_v12_1_get_sa_active_bitmap(adev, xcc_id) >> bitmap) & 1))
3794 					continue;
3795 				mask = 1;
3796 				counter = 0;
3797 				gfx_v12_1_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id);
3798 				if (i < 8 && j < 2)
3799 					gfx_v12_1_set_user_wgp_inactive_bitmap_per_sh(
3800 						adev, disable_masks[i * 2 + j], xcc_id);
3801 				bitmap = gfx_v12_1_get_cu_active_bitmap_per_sh(adev, xcc_id);
3802 
3803 				/**
3804 				 * GFX12 could support more than 4 SEs, while the bitmap
3805 				 * in cu_info struct is 4x4 and ioctl interface struct
3806 				 * drm_amdgpu_info_device should keep stable.
3807 				 * So we use last two columns of bitmap to store cu mask for
3808 				 * SEs 4 to 7, the layout of the bitmap is as below:
3809 				 *    SE0: {SH0,SH1} --> {bitmap[0][0], bitmap[0][1]}
3810 				 *    SE1: {SH0,SH1} --> {bitmap[1][0], bitmap[1][1]}
3811 				 *    SE2: {SH0,SH1} --> {bitmap[2][0], bitmap[2][1]}
3812 				 *    SE3: {SH0,SH1} --> {bitmap[3][0], bitmap[3][1]}
3813 				 *    SE4: {SH0,SH1} --> {bitmap[0][2], bitmap[0][3]}
3814 				 *    SE5: {SH0,SH1} --> {bitmap[1][2], bitmap[1][3]}
3815 				 *    SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]}
3816 				 *    SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]}
3817 				 */
3818 				cu_info->bitmap[0][i % 4][j + (i / 4) * 2] = bitmap;
3819 
3820 				for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
3821 					if (bitmap & mask)
3822 						counter++;
3823 
3824 					mask <<= 1;
3825 				}
3826 				active_cu_number += counter;
3827 			}
3828 		}
3829 		gfx_v12_1_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, xcc_id);
3830 	}
3831 	mutex_unlock(&adev->grbm_idx_mutex);
3832 
3833 	cu_info->number = active_cu_number;
3834 	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
3835 	cu_info->lds_size = 320;
3836 
3837 	return 0;
3838 }
3839 
3840 const struct amdgpu_ip_block_version gfx_v12_1_ip_block = {
3841 	.type = AMD_IP_BLOCK_TYPE_GFX,
3842 	.major = 12,
3843 	.minor = 1,
3844 	.rev = 0,
3845 	.funcs = &gfx_v12_1_ip_funcs,
3846 };
3847 
3848 static int gfx_v12_1_xcp_resume(void *handle, uint32_t inst_mask)
3849 {
3850 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3851 	uint32_t tmp_mask;
3852 	int i, r;
3853 
3854 	/* TODO : Initialize golden regs */
3855 	/* gfx_v12_1_init_golden_registers(adev); */
3856 
3857 	tmp_mask = inst_mask;
3858 	for_each_inst(i, tmp_mask)
3859 		gfx_v12_1_xcc_constants_init(adev, i);
3860 
3861 	if (!amdgpu_sriov_vf(adev)) {
3862 		tmp_mask = inst_mask;
3863 		for_each_inst(i, tmp_mask) {
3864 			r = gfx_v12_1_xcc_rlc_resume(adev, i);
3865 			if (r)
3866 				return r;
3867 		}
3868 	}
3869 
3870 	tmp_mask = inst_mask;
3871 	for_each_inst(i, tmp_mask) {
3872 		r = gfx_v12_1_xcc_cp_resume(adev, i);
3873 		if (r)
3874 			return r;
3875 	}
3876 
3877 	return 0;
3878 }
3879 
3880 static int gfx_v12_1_xcp_suspend(void *handle, uint32_t inst_mask)
3881 {
3882 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3883 	int i;
3884 
3885 	for_each_inst(i, inst_mask)
3886 		gfx_v12_1_xcc_fini(adev, i);
3887 
3888 	return 0;
3889 }
3890 
3891 struct amdgpu_xcp_ip_funcs gfx_v12_1_xcp_funcs = {
3892 	.suspend = &gfx_v12_1_xcp_suspend,
3893 	.resume = &gfx_v12_1_xcp_resume
3894 };
3895