xref: /linux/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c (revision c2aa3089ad7e7fec3ec4a58d8d0904b5e9b392a1)
1 /*
2  * Copyright 2024 Advanced Micro Devices, Inc. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include "amdgpu.h"
26 #include "amdgpu_vcn.h"
27 #include "amdgpu_pm.h"
28 #include "soc15.h"
29 #include "soc15d.h"
30 #include "soc15_hw_ip.h"
31 #include "vcn_v2_0.h"
32 #include "vcn_v4_0_3.h"
33 #include "mmsch_v5_0.h"
34 
35 #include "vcn/vcn_5_0_0_offset.h"
36 #include "vcn/vcn_5_0_0_sh_mask.h"
37 #include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
38 #include "vcn_v5_0_0.h"
39 #include "vcn_v5_0_1.h"
40 
41 #include <drm/drm_drv.h>
42 
43 static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev);
44 static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev);
45 static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
46 static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst,
47 				   enum amd_powergating_state state);
48 static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring);
49 static void vcn_v5_0_1_set_ras_funcs(struct amdgpu_device *adev);
50 /**
51  * vcn_v5_0_1_early_init - set function pointers and load microcode
52  *
53  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
54  *
55  * Set ring and irq function pointers
56  * Load microcode from filesystem
57  */
58 static int vcn_v5_0_1_early_init(struct amdgpu_ip_block *ip_block)
59 {
60 	struct amdgpu_device *adev = ip_block->adev;
61 	int i, r;
62 
63 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
64 		/* re-use enc ring as unified ring */
65 		adev->vcn.inst[i].num_enc_rings = 1;
66 
67 	vcn_v5_0_1_set_unified_ring_funcs(adev);
68 	vcn_v5_0_1_set_irq_funcs(adev);
69 	vcn_v5_0_1_set_ras_funcs(adev);
70 
71 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
72 		adev->vcn.inst[i].set_pg_state = vcn_v5_0_1_set_pg_state;
73 
74 		r = amdgpu_vcn_early_init(adev, i);
75 		if (r)
76 			return r;
77 	}
78 
79 	return 0;
80 }
81 
82 static void vcn_v5_0_1_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
83 {
84 	struct amdgpu_vcn5_fw_shared *fw_shared;
85 
86 	fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
87 
88 	if (fw_shared->sq.is_enabled)
89 		return;
90 	fw_shared->present_flag_0 =
91 		cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
92 	fw_shared->sq.is_enabled = 1;
93 
94 	if (amdgpu_vcnfw_log)
95 		amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]);
96 }
97 
98 /**
99  * vcn_v5_0_1_sw_init - sw init for VCN block
100  *
101  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
102  *
103  * Load firmware and sw initialization
104  */
105 static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
106 {
107 	struct amdgpu_device *adev = ip_block->adev;
108 	struct amdgpu_ring *ring;
109 	int i, r, vcn_inst;
110 
111 	/* VCN UNIFIED TRAP */
112 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
113 		VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq);
114 	if (r)
115 		return r;
116 
117 	/* VCN POISON TRAP */
118 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
119 		VCN_5_0__SRCID_UVD_POISON, &adev->vcn.inst->ras_poison_irq);
120 
121 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
122 		vcn_inst = GET_INST(VCN, i);
123 
124 		r = amdgpu_vcn_sw_init(adev, i);
125 		if (r)
126 			return r;
127 
128 		amdgpu_vcn_setup_ucode(adev, i);
129 
130 		r = amdgpu_vcn_resume(adev, i);
131 		if (r)
132 			return r;
133 
134 		ring = &adev->vcn.inst[i].ring_enc[0];
135 		ring->use_doorbell = true;
136 		if (!amdgpu_sriov_vf(adev))
137 			ring->doorbell_index =
138 				(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
139 				11 * vcn_inst;
140 		else
141 			ring->doorbell_index =
142 				(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
143 				32 * vcn_inst;
144 
145 		ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
146 		sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
147 
148 		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
149 					AMDGPU_RING_PRIO_DEFAULT, &adev->vcn.inst[i].sched_score);
150 		if (r)
151 			return r;
152 
153 		vcn_v5_0_1_fw_shared_init(adev, i);
154 	}
155 
156 	/* TODO: Add queue reset mask when FW fully supports it */
157 	adev->vcn.supported_reset =
158 		amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
159 
160 	if (amdgpu_sriov_vf(adev)) {
161 		r = amdgpu_virt_alloc_mm_table(adev);
162 		if (r)
163 			return r;
164 	}
165 
166 	vcn_v5_0_0_alloc_ip_dump(adev);
167 
168 	return amdgpu_vcn_sysfs_reset_mask_init(adev);
169 }
170 
171 /**
172  * vcn_v5_0_1_sw_fini - sw fini for VCN block
173  *
174  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
175  *
176  * VCN suspend and free up sw allocation
177  */
178 static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
179 {
180 	struct amdgpu_device *adev = ip_block->adev;
181 	int i, r, idx;
182 
183 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
184 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
185 			volatile struct amdgpu_vcn5_fw_shared *fw_shared;
186 
187 			fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
188 			fw_shared->present_flag_0 = 0;
189 			fw_shared->sq.is_enabled = 0;
190 		}
191 
192 		drm_dev_exit(idx);
193 	}
194 
195 	if (amdgpu_sriov_vf(adev))
196 		amdgpu_virt_free_mm_table(adev);
197 
198 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
199 		r = amdgpu_vcn_suspend(adev, i);
200 		if (r)
201 			return r;
202 	}
203 
204 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
205 		r = amdgpu_vcn_sw_fini(adev, i);
206 		if (r)
207 			return r;
208 	}
209 
210 	amdgpu_vcn_sysfs_reset_mask_fini(adev);
211 
212 	kfree(adev->vcn.ip_dump);
213 
214 	return 0;
215 }
216 
217 /**
218  * vcn_v5_0_1_hw_init - start and test VCN block
219  *
220  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
221  *
222  * Initialize the hardware, boot up the VCPU and do some testing
223  */
224 static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
225 {
226 	struct amdgpu_device *adev = ip_block->adev;
227 	struct amdgpu_ring *ring;
228 	int i, r, vcn_inst;
229 
230 	if (amdgpu_sriov_vf(adev)) {
231 		r = vcn_v5_0_1_start_sriov(adev);
232 		if (r)
233 			return r;
234 
235 		for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
236 			ring = &adev->vcn.inst[i].ring_enc[0];
237 			ring->wptr = 0;
238 			ring->wptr_old = 0;
239 			vcn_v5_0_1_unified_ring_set_wptr(ring);
240 			ring->sched.ready = true;
241 		}
242 	} else {
243 		if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
244 			adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
245 		for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
246 			vcn_inst = GET_INST(VCN, i);
247 			ring = &adev->vcn.inst[i].ring_enc[0];
248 
249 			if (ring->use_doorbell)
250 				adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
251 					((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
252 					11 * vcn_inst),
253 					adev->vcn.inst[i].aid_id);
254 
255 			/* Re-init fw_shared, if required */
256 			vcn_v5_0_1_fw_shared_init(adev, i);
257 
258 			r = amdgpu_ring_test_helper(ring);
259 			if (r)
260 				return r;
261 		}
262 	}
263 
264 	return 0;
265 }
266 
267 /**
268  * vcn_v5_0_1_hw_fini - stop the hardware block
269  *
270  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
271  *
272  * Stop the VCN block, mark ring as not ready any more
273  */
274 static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
275 {
276 	struct amdgpu_device *adev = ip_block->adev;
277 	int i;
278 
279 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
280 		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
281 
282 		cancel_delayed_work_sync(&adev->vcn.inst[i].idle_work);
283 		if (vinst->cur_state != AMD_PG_STATE_GATE)
284 			vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
285 	}
286 
287 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
288 		amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0);
289 
290 	return 0;
291 }
292 
293 /**
294  * vcn_v5_0_1_suspend - suspend VCN block
295  *
296  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
297  *
298  * HW fini and suspend VCN block
299  */
300 static int vcn_v5_0_1_suspend(struct amdgpu_ip_block *ip_block)
301 {
302 	struct amdgpu_device *adev = ip_block->adev;
303 	int r, i;
304 
305 	r = vcn_v5_0_1_hw_fini(ip_block);
306 	if (r)
307 		return r;
308 
309 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
310 		r = amdgpu_vcn_suspend(ip_block->adev, i);
311 		if (r)
312 			return r;
313 	}
314 
315 	return r;
316 }
317 
318 /**
319  * vcn_v5_0_1_resume - resume VCN block
320  *
321  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
322  *
323  * Resume firmware and hw init VCN block
324  */
325 static int vcn_v5_0_1_resume(struct amdgpu_ip_block *ip_block)
326 {
327 	struct amdgpu_device *adev = ip_block->adev;
328 	int r, i;
329 
330 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
331 		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
332 
333 		if (amdgpu_in_reset(adev))
334 			vinst->cur_state = AMD_PG_STATE_GATE;
335 
336 		r = amdgpu_vcn_resume(ip_block->adev, i);
337 		if (r)
338 			return r;
339 	}
340 
341 	r = vcn_v5_0_1_hw_init(ip_block);
342 
343 	return r;
344 }
345 
346 /**
347  * vcn_v5_0_1_mc_resume - memory controller programming
348  *
349  * @vinst: VCN instance
350  *
351  * Let the VCN memory controller know it's offsets
352  */
353 static void vcn_v5_0_1_mc_resume(struct amdgpu_vcn_inst *vinst)
354 {
355 	struct amdgpu_device *adev = vinst->adev;
356 	int inst = vinst->inst;
357 	uint32_t offset, size, vcn_inst;
358 	const struct common_firmware_header *hdr;
359 
360 	hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data;
361 	size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
362 
363 	vcn_inst = GET_INST(VCN, inst);
364 	/* cache window 0: fw */
365 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
366 		WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
367 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
368 		WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
369 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
370 		WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, 0);
371 		offset = 0;
372 	} else {
373 		WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
374 			lower_32_bits(adev->vcn.inst[inst].gpu_addr));
375 		WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
376 			upper_32_bits(adev->vcn.inst[inst].gpu_addr));
377 		offset = size;
378 		WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0,
379 				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
380 	}
381 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE0, size);
382 
383 	/* cache window 1: stack */
384 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
385 		lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
386 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
387 		upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
388 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET1, 0);
389 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
390 
391 	/* cache window 2: context */
392 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
393 		lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
394 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
395 		upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
396 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET2, 0);
397 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
398 
399 	/* non-cache window */
400 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
401 		lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
402 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
403 		upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
404 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
405 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_SIZE0,
406 		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)));
407 }
408 
409 /**
410  * vcn_v5_0_1_mc_resume_dpg_mode - memory controller programming for dpg mode
411  *
412  * @vinst: VCN instance
413  * @indirect: indirectly write sram
414  *
415  * Let the VCN memory controller know it's offsets with dpg mode
416  */
417 static void vcn_v5_0_1_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
418 					  bool indirect)
419 {
420 	struct amdgpu_device *adev = vinst->adev;
421 	int inst_idx = vinst->inst;
422 	uint32_t offset, size;
423 	const struct common_firmware_header *hdr;
424 
425 	hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
426 	size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
427 
428 	/* cache window 0: fw */
429 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
430 		if (!indirect) {
431 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
432 				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
433 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
434 				 inst_idx].tmr_mc_addr_lo), 0, indirect);
435 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
436 				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
437 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
438 				 inst_idx].tmr_mc_addr_hi), 0, indirect);
439 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
440 				VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
441 		} else {
442 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
443 				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
444 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
445 				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
446 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
447 				VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
448 		}
449 		offset = 0;
450 	} else {
451 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
452 			VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
453 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
454 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
455 			VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
456 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
457 		offset = size;
458 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
459 			VCN, 0, regUVD_VCPU_CACHE_OFFSET0),
460 			AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
461 	}
462 
463 	if (!indirect)
464 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
465 			VCN, 0, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
466 	else
467 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
468 			VCN, 0, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
469 
470 	/* cache window 1: stack */
471 	if (!indirect) {
472 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
473 			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
474 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
475 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
476 			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
477 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
478 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
479 			VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
480 	} else {
481 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
482 			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
483 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
484 			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
485 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
486 			VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
487 	}
488 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
489 			VCN, 0, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
490 
491 	/* cache window 2: context */
492 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
493 		VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
494 		lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
495 			AMDGPU_VCN_STACK_SIZE), 0, indirect);
496 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
497 		VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
498 		upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
499 			AMDGPU_VCN_STACK_SIZE), 0, indirect);
500 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
501 		VCN, 0, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
502 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
503 		VCN, 0, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
504 
505 	/* non-cache window */
506 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
507 		VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
508 		lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
509 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
510 		VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
511 		upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
512 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
513 		VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
514 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
515 		VCN, 0, regUVD_VCPU_NONCACHE_SIZE0),
516 		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)), 0, indirect);
517 
518 	/* VCN global tiling registers */
519 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
520 		VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
521 }
522 
523 /**
524  * vcn_v5_0_1_disable_clock_gating - disable VCN clock gating
525  *
526  * @vinst: VCN instance
527  *
528  * Disable clock gating for VCN block
529  */
530 static void vcn_v5_0_1_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
531 {
532 }
533 
534 /**
535  * vcn_v5_0_1_enable_clock_gating - enable VCN clock gating
536  *
537  * @vinst: VCN instance
538  *
539  * Enable clock gating for VCN block
540  */
541 static void vcn_v5_0_1_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
542 {
543 }
544 
545 /**
546  * vcn_v5_0_1_pause_dpg_mode - VCN pause with dpg mode
547  *
548  * @vinst: VCN instance
549  * @new_state: pause state
550  *
551  * Pause dpg mode for VCN block
552  */
553 static int vcn_v5_0_1_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
554 				     struct dpg_pause_state *new_state)
555 {
556 	struct amdgpu_device *adev = vinst->adev;
557 	uint32_t reg_data = 0;
558 	int vcn_inst;
559 
560 	vcn_inst = GET_INST(VCN, vinst->inst);
561 
562 	/* pause/unpause if state is changed */
563 	if (vinst->pause_state.fw_based != new_state->fw_based) {
564 		DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d %s\n",
565 			vinst->pause_state.fw_based, new_state->fw_based,
566 			new_state->fw_based ? "VCN_DPG_STATE__PAUSE" : "VCN_DPG_STATE__UNPAUSE");
567 		reg_data = RREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE) &
568 			(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
569 
570 		if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
571 			/* pause DPG */
572 			reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
573 			WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data);
574 
575 			/* wait for ACK */
576 			SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_DPG_PAUSE,
577 					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
578 					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
579 		} else {
580 			/* unpause DPG, no need to wait */
581 			reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
582 			WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data);
583 		}
584 		vinst->pause_state.fw_based = new_state->fw_based;
585 	}
586 
587 	return 0;
588 }
589 
590 
591 /**
592  * vcn_v5_0_1_start_dpg_mode - VCN start with dpg mode
593  *
594  * @vinst: VCN instance
595  * @indirect: indirectly write sram
596  *
597  * Start VCN block with dpg mode
598  */
599 static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
600 				     bool indirect)
601 {
602 	struct amdgpu_device *adev = vinst->adev;
603 	int inst_idx = vinst->inst;
604 	volatile struct amdgpu_vcn5_fw_shared *fw_shared =
605 		adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
606 	struct amdgpu_ring *ring;
607 	struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__PAUSE};
608 	int vcn_inst, ret;
609 	uint32_t tmp;
610 
611 	vcn_inst = GET_INST(VCN, inst_idx);
612 
613 	/* disable register anti-hang mechanism */
614 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 1,
615 		~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
616 
617 	/* enable dynamic power gating mode */
618 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS);
619 	tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
620 	WREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS, tmp);
621 
622 	if (indirect) {
623 		adev->vcn.inst[inst_idx].dpg_sram_curr_addr =
624 			(uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
625 		/* Use dummy register 0xDEADBEEF passing AID selection to PSP FW */
626 		WREG32_SOC24_DPG_MODE(inst_idx, 0xDEADBEEF,
627 				adev->vcn.inst[inst_idx].aid_id, 0, true);
628 	}
629 
630 	/* enable VCPU clock */
631 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
632 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK;
633 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
634 		VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
635 
636 	/* disable master interrupt */
637 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
638 		VCN, 0, regUVD_MASTINT_EN), 0, 0, indirect);
639 
640 	/* setup regUVD_LMI_CTRL */
641 	tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
642 		UVD_LMI_CTRL__REQ_MODE_MASK |
643 		UVD_LMI_CTRL__CRC_RESET_MASK |
644 		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
645 		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
646 		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
647 		(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
648 		0x00100000L);
649 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
650 		VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect);
651 
652 	vcn_v5_0_1_mc_resume_dpg_mode(vinst, indirect);
653 
654 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
655 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
656 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
657 		VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
658 
659 	/* enable LMI MC and UMC channels */
660 	tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
661 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
662 		VCN, 0, regUVD_LMI_CTRL2), tmp, 0, indirect);
663 
664 	/* enable master interrupt */
665 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
666 		VCN, 0, regUVD_MASTINT_EN),
667 		UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
668 
669 	if (indirect) {
670 		ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
671 		if (ret) {
672 			dev_err(adev->dev, "vcn sram load failed %d\n", ret);
673 			return ret;
674 		}
675 	}
676 
677 	/* resetting ring, fw should not check RB ring */
678 	fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
679 
680 	/* Pause dpg */
681 	vcn_v5_0_1_pause_dpg_mode(vinst, &state);
682 
683 	ring = &adev->vcn.inst[inst_idx].ring_enc[0];
684 
685 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, lower_32_bits(ring->gpu_addr));
686 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
687 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / sizeof(uint32_t));
688 
689 	tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
690 	tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
691 	WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
692 
693 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
694 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
695 
696 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR);
697 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp);
698 	ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
699 
700 	tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
701 	tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
702 	WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
703 	/* resetting done, fw can check RB ring */
704 	fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
705 
706 	WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
707 		ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
708 		VCN_RB1_DB_CTRL__EN_MASK);
709 	/* Read DB_CTRL to flush the write DB_CTRL command. */
710 	RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
711 
712 	return 0;
713 }
714 
715 static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev)
716 {
717 	int i, vcn_inst;
718 	struct amdgpu_ring *ring_enc;
719 	uint64_t cache_addr;
720 	uint64_t rb_enc_addr;
721 	uint64_t ctx_addr;
722 	uint32_t param, resp, expected;
723 	uint32_t offset, cache_size;
724 	uint32_t tmp, timeout;
725 
726 	struct amdgpu_mm_table *table = &adev->virt.mm_table;
727 	uint32_t *table_loc;
728 	uint32_t table_size;
729 	uint32_t size, size_dw;
730 	uint32_t init_status;
731 	uint32_t enabled_vcn;
732 
733 	struct mmsch_v5_0_cmd_direct_write
734 		direct_wt = { {0} };
735 	struct mmsch_v5_0_cmd_direct_read_modify_write
736 		direct_rd_mod_wt = { {0} };
737 	struct mmsch_v5_0_cmd_end end = { {0} };
738 	struct mmsch_v5_0_init_header header;
739 
740 	volatile struct amdgpu_vcn5_fw_shared *fw_shared;
741 	volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
742 
743 	direct_wt.cmd_header.command_type =
744 		MMSCH_COMMAND__DIRECT_REG_WRITE;
745 	direct_rd_mod_wt.cmd_header.command_type =
746 		MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
747 	end.cmd_header.command_type = MMSCH_COMMAND__END;
748 
749 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
750 		vcn_inst = GET_INST(VCN, i);
751 
752 		vcn_v5_0_1_fw_shared_init(adev, vcn_inst);
753 
754 		memset(&header, 0, sizeof(struct mmsch_v5_0_init_header));
755 		header.version = MMSCH_VERSION;
756 		header.total_size = sizeof(struct mmsch_v5_0_init_header) >> 2;
757 
758 		table_loc = (uint32_t *)table->cpu_addr;
759 		table_loc += header.total_size;
760 
761 		table_size = 0;
762 
763 		MMSCH_V5_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS),
764 			~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
765 
766 		cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
767 
768 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
769 			MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
770 				regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
771 				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
772 
773 			MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
774 				regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
775 				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
776 
777 			offset = 0;
778 			MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
779 				regUVD_VCPU_CACHE_OFFSET0), 0);
780 		} else {
781 			MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
782 				regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
783 				lower_32_bits(adev->vcn.inst[i].gpu_addr));
784 			MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
785 				regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
786 				upper_32_bits(adev->vcn.inst[i].gpu_addr));
787 			offset = cache_size;
788 			MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
789 				regUVD_VCPU_CACHE_OFFSET0),
790 				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
791 		}
792 
793 		MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
794 			regUVD_VCPU_CACHE_SIZE0),
795 			cache_size);
796 
797 		cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset;
798 		MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
799 			regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), lower_32_bits(cache_addr));
800 		MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
801 			regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), upper_32_bits(cache_addr));
802 		MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
803 			regUVD_VCPU_CACHE_OFFSET1), 0);
804 		MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
805 			regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE);
806 
807 		cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset +
808 			AMDGPU_VCN_STACK_SIZE;
809 
810 		MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
811 			regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), lower_32_bits(cache_addr));
812 
813 		MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
814 			regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), upper_32_bits(cache_addr));
815 
816 		MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
817 			regUVD_VCPU_CACHE_OFFSET2), 0);
818 
819 		MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
820 			regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE);
821 
822 		fw_shared = adev->vcn.inst[vcn_inst].fw_shared.cpu_addr;
823 		rb_setup = &fw_shared->rb_setup;
824 
825 		ring_enc = &adev->vcn.inst[vcn_inst].ring_enc[0];
826 		ring_enc->wptr = 0;
827 		rb_enc_addr = ring_enc->gpu_addr;
828 
829 		rb_setup->is_rb_enabled_flags |= RB_ENABLED;
830 		rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr);
831 		rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr);
832 		rb_setup->rb_size = ring_enc->ring_size / 4;
833 		fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
834 
835 		MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
836 			regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
837 			lower_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr));
838 		MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
839 			regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
840 			upper_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr));
841 		MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
842 			regUVD_VCPU_NONCACHE_SIZE0),
843 			AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
844 		MMSCH_V5_0_INSERT_END();
845 
846 		header.vcn0.init_status = 0;
847 		header.vcn0.table_offset = header.total_size;
848 		header.vcn0.table_size = table_size;
849 		header.total_size += table_size;
850 
851 		/* Send init table to mmsch */
852 		size = sizeof(struct mmsch_v5_0_init_header);
853 		table_loc = (uint32_t *)table->cpu_addr;
854 		memcpy((void *)table_loc, &header, size);
855 
856 		ctx_addr = table->gpu_addr;
857 		WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
858 		WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
859 
860 		tmp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID);
861 		tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
862 		tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
863 		WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID, tmp);
864 
865 		size = header.total_size;
866 		WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_SIZE, size);
867 
868 		WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP, 0);
869 
870 		param = 0x00000001;
871 		WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_HOST, param);
872 		tmp = 0;
873 		timeout = 1000;
874 		resp = 0;
875 		expected = MMSCH_VF_MAILBOX_RESP__OK;
876 		while (resp != expected) {
877 			resp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP);
878 			if (resp != 0)
879 				break;
880 
881 			udelay(10);
882 			tmp = tmp + 10;
883 			if (tmp >= timeout) {
884 				DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
885 					" waiting for regMMSCH_VF_MAILBOX_RESP "\
886 					"(expected=0x%08x, readback=0x%08x)\n",
887 					tmp, expected, resp);
888 				return -EBUSY;
889 			}
890 		}
891 
892 		enabled_vcn = amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 0) ? 1 : 0;
893 		init_status = ((struct mmsch_v5_0_init_header *)(table_loc))->vcn0.init_status;
894 		if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE
895 					&& init_status != MMSCH_VF_ENGINE_STATUS__PASS) {
896 			DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init "\
897 				"status for VCN%x: 0x%x\n", resp, enabled_vcn, init_status);
898 		}
899 	}
900 
901 	return 0;
902 }
903 
904 /**
905  * vcn_v5_0_1_start - VCN start
906  *
907  * @vinst: VCN instance
908  *
909  * Start VCN block
910  */
911 static int vcn_v5_0_1_start(struct amdgpu_vcn_inst *vinst)
912 {
913 	struct amdgpu_device *adev = vinst->adev;
914 	int i = vinst->inst;
915 	volatile struct amdgpu_vcn5_fw_shared *fw_shared;
916 	struct amdgpu_ring *ring;
917 	uint32_t tmp;
918 	int j, k, r, vcn_inst;
919 
920 	fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
921 
922 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
923 		return vcn_v5_0_1_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
924 
925 	vcn_inst = GET_INST(VCN, i);
926 
927 	/* set VCN status busy */
928 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
929 	WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
930 
931 	/* enable VCPU clock */
932 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
933 		 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
934 
935 	/* disable master interrupt */
936 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
937 		 ~UVD_MASTINT_EN__VCPU_EN_MASK);
938 
939 	/* enable LMI MC and UMC channels */
940 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
941 		 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
942 
943 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
944 	tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
945 	tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
946 	WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
947 
948 	/* setup regUVD_LMI_CTRL */
949 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
950 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL, tmp |
951 		     UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
952 		     UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
953 		     UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
954 		     UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
955 
956 	vcn_v5_0_1_mc_resume(vinst);
957 
958 	/* VCN global tiling registers */
959 	WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
960 		     adev->gfx.config.gb_addr_config);
961 
962 	/* unblock VCPU register access */
963 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
964 		 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
965 
966 	/* release VCPU reset to boot */
967 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
968 		 ~UVD_VCPU_CNTL__BLK_RST_MASK);
969 
970 	for (j = 0; j < 10; ++j) {
971 		uint32_t status;
972 
973 		for (k = 0; k < 100; ++k) {
974 			status = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
975 			if (status & 2)
976 				break;
977 			mdelay(100);
978 			if (amdgpu_emu_mode == 1)
979 				msleep(20);
980 		}
981 
982 		if (amdgpu_emu_mode == 1) {
983 			r = -1;
984 			if (status & 2) {
985 				r = 0;
986 				break;
987 			}
988 		} else {
989 			r = 0;
990 			if (status & 2)
991 				break;
992 
993 			dev_err(adev->dev,
994 				"VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
995 			WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
996 				 UVD_VCPU_CNTL__BLK_RST_MASK,
997 				 ~UVD_VCPU_CNTL__BLK_RST_MASK);
998 			mdelay(10);
999 			WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
1000 				 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1001 
1002 			mdelay(10);
1003 			r = -1;
1004 		}
1005 	}
1006 
1007 	if (r) {
1008 		dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
1009 		return r;
1010 	}
1011 
1012 	/* enable master interrupt */
1013 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
1014 		 UVD_MASTINT_EN__VCPU_EN_MASK,
1015 		 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1016 
1017 	/* clear the busy bit of VCN_STATUS */
1018 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
1019 		 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1020 
1021 	ring = &adev->vcn.inst[i].ring_enc[0];
1022 
1023 	WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
1024 		     ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
1025 		     VCN_RB1_DB_CTRL__EN_MASK);
1026 
1027 	/* Read DB_CTRL to flush the write DB_CTRL command. */
1028 	RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
1029 
1030 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, ring->gpu_addr);
1031 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1032 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / 4);
1033 
1034 	tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
1035 	tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
1036 	WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
1037 	fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
1038 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
1039 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
1040 
1041 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR);
1042 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp);
1043 	ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
1044 
1045 	tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
1046 	tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
1047 	WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
1048 	fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
1049 
1050 	/* Keeping one read-back to ensure all register writes are done,
1051 	 * otherwise it may introduce race conditions.
1052 	 */
1053 	RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
1054 
1055 	return 0;
1056 }
1057 
1058 /**
1059  * vcn_v5_0_1_stop_dpg_mode - VCN stop with dpg mode
1060  *
1061  * @vinst: VCN instance
1062  *
1063  * Stop VCN block with dpg mode
1064  */
1065 static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
1066 {
1067 	struct amdgpu_device *adev = vinst->adev;
1068 	int inst_idx = vinst->inst;
1069 	uint32_t tmp;
1070 	int vcn_inst;
1071 	struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
1072 
1073 	vcn_inst = GET_INST(VCN, inst_idx);
1074 
1075 	/* Unpause dpg */
1076 	vcn_v5_0_1_pause_dpg_mode(vinst, &state);
1077 
1078 	/* Wait for power status to be 1 */
1079 	SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
1080 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1081 
1082 	/* wait for read ptr to be equal to write ptr */
1083 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
1084 	SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1085 
1086 	/* disable dynamic power gating mode */
1087 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0,
1088 		~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1089 
1090 	/* Keeping one read-back to ensure all register writes are done,
1091 	 * otherwise it may introduce race conditions.
1092 	 */
1093 	RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
1094 }
1095 
1096 /**
1097  * vcn_v5_0_1_stop - VCN stop
1098  *
1099  * @vinst: VCN instance
1100  *
1101  * Stop VCN block
1102  */
1103 static int vcn_v5_0_1_stop(struct amdgpu_vcn_inst *vinst)
1104 {
1105 	struct amdgpu_device *adev = vinst->adev;
1106 	int i = vinst->inst;
1107 	volatile struct amdgpu_vcn5_fw_shared *fw_shared;
1108 	uint32_t tmp;
1109 	int r = 0, vcn_inst;
1110 
1111 	vcn_inst = GET_INST(VCN, i);
1112 
1113 	fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1114 	fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
1115 
1116 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1117 		vcn_v5_0_1_stop_dpg_mode(vinst);
1118 		return 0;
1119 	}
1120 
1121 	/* wait for vcn idle */
1122 	r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1123 	if (r)
1124 		return r;
1125 
1126 	tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1127 		UVD_LMI_STATUS__READ_CLEAN_MASK |
1128 		UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1129 		UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1130 	r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
1131 	if (r)
1132 		return r;
1133 
1134 	/* disable LMI UMC channel */
1135 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
1136 	tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1137 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
1138 	tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1139 		UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1140 	r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
1141 	if (r)
1142 		return r;
1143 
1144 	/* block VCPU register access */
1145 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
1146 		 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1147 		 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1148 
1149 	/* reset VCPU */
1150 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
1151 		 UVD_VCPU_CNTL__BLK_RST_MASK,
1152 		 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1153 
1154 	/* disable VCPU clock */
1155 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
1156 		 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1157 
1158 	/* apply soft reset */
1159 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
1160 	tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1161 	WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
1162 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
1163 	tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1164 	WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
1165 
1166 	/* clear status */
1167 	WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
1168 
1169 	/* Keeping one read-back to ensure all register writes are done,
1170 	 * otherwise it may introduce race conditions.
1171 	 */
1172 	RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
1173 
1174 	return 0;
1175 }
1176 
1177 /**
1178  * vcn_v5_0_1_unified_ring_get_rptr - get unified read pointer
1179  *
1180  * @ring: amdgpu_ring pointer
1181  *
1182  * Returns the current hardware unified read pointer
1183  */
1184 static uint64_t vcn_v5_0_1_unified_ring_get_rptr(struct amdgpu_ring *ring)
1185 {
1186 	struct amdgpu_device *adev = ring->adev;
1187 
1188 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1189 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1190 
1191 	return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_RPTR);
1192 }
1193 
1194 /**
1195  * vcn_v5_0_1_unified_ring_get_wptr - get unified write pointer
1196  *
1197  * @ring: amdgpu_ring pointer
1198  *
1199  * Returns the current hardware unified write pointer
1200  */
1201 static uint64_t vcn_v5_0_1_unified_ring_get_wptr(struct amdgpu_ring *ring)
1202 {
1203 	struct amdgpu_device *adev = ring->adev;
1204 
1205 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1206 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1207 
1208 	if (ring->use_doorbell)
1209 		return *ring->wptr_cpu_addr;
1210 	else
1211 		return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR);
1212 }
1213 
1214 /**
1215  * vcn_v5_0_1_unified_ring_set_wptr - set enc write pointer
1216  *
1217  * @ring: amdgpu_ring pointer
1218  *
1219  * Commits the enc write pointer to the hardware
1220  */
1221 static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring)
1222 {
1223 	struct amdgpu_device *adev = ring->adev;
1224 
1225 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1226 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1227 
1228 	if (ring->use_doorbell) {
1229 		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1230 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1231 	} else {
1232 		WREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR,
1233 				lower_32_bits(ring->wptr));
1234 	}
1235 }
1236 
1237 static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = {
1238 	.type = AMDGPU_RING_TYPE_VCN_ENC,
1239 	.align_mask = 0x3f,
1240 	.nop = VCN_ENC_CMD_NO_OP,
1241 	.get_rptr = vcn_v5_0_1_unified_ring_get_rptr,
1242 	.get_wptr = vcn_v5_0_1_unified_ring_get_wptr,
1243 	.set_wptr = vcn_v5_0_1_unified_ring_set_wptr,
1244 	.emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1245 			   SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1246 			   4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1247 			   5 +
1248 			   5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1249 			   1, /* vcn_v2_0_enc_ring_insert_end */
1250 	.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1251 	.emit_ib = vcn_v2_0_enc_ring_emit_ib,
1252 	.emit_fence = vcn_v2_0_enc_ring_emit_fence,
1253 	.emit_vm_flush = vcn_v4_0_3_enc_ring_emit_vm_flush,
1254 	.emit_hdp_flush = vcn_v4_0_3_ring_emit_hdp_flush,
1255 	.test_ring = amdgpu_vcn_enc_ring_test_ring,
1256 	.test_ib = amdgpu_vcn_unified_ring_test_ib,
1257 	.insert_nop = amdgpu_ring_insert_nop,
1258 	.insert_end = vcn_v2_0_enc_ring_insert_end,
1259 	.pad_ib = amdgpu_ring_generic_pad_ib,
1260 	.begin_use = amdgpu_vcn_ring_begin_use,
1261 	.end_use = amdgpu_vcn_ring_end_use,
1262 	.emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg,
1263 	.emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait,
1264 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1265 };
1266 
1267 /**
1268  * vcn_v5_0_1_set_unified_ring_funcs - set unified ring functions
1269  *
1270  * @adev: amdgpu_device pointer
1271  *
1272  * Set unified ring functions
1273  */
1274 static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev)
1275 {
1276 	int i, vcn_inst;
1277 
1278 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1279 		adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v5_0_1_unified_ring_vm_funcs;
1280 		adev->vcn.inst[i].ring_enc[0].me = i;
1281 		vcn_inst = GET_INST(VCN, i);
1282 		adev->vcn.inst[i].aid_id = vcn_inst / adev->vcn.num_inst_per_aid;
1283 	}
1284 }
1285 
1286 /**
1287  * vcn_v5_0_1_is_idle - check VCN block is idle
1288  *
1289  * @ip_block: Pointer to the amdgpu_ip_block structure
1290  *
1291  * Check whether VCN block is idle
1292  */
1293 static bool vcn_v5_0_1_is_idle(struct amdgpu_ip_block *ip_block)
1294 {
1295 	struct amdgpu_device *adev = ip_block->adev;
1296 	int i, ret = 1;
1297 
1298 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
1299 		ret &= (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) == UVD_STATUS__IDLE);
1300 
1301 	return ret;
1302 }
1303 
1304 /**
1305  * vcn_v5_0_1_wait_for_idle - wait for VCN block idle
1306  *
1307  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
1308  *
1309  * Wait for VCN block idle
1310  */
1311 static int vcn_v5_0_1_wait_for_idle(struct amdgpu_ip_block *ip_block)
1312 {
1313 	struct amdgpu_device *adev = ip_block->adev;
1314 	int i, ret = 0;
1315 
1316 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1317 		ret = SOC15_WAIT_ON_RREG(VCN, GET_INST(VCN, i), regUVD_STATUS, UVD_STATUS__IDLE,
1318 			UVD_STATUS__IDLE);
1319 		if (ret)
1320 			return ret;
1321 	}
1322 
1323 	return ret;
1324 }
1325 
1326 /**
1327  * vcn_v5_0_1_set_clockgating_state - set VCN block clockgating state
1328  *
1329  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
1330  * @state: clock gating state
1331  *
1332  * Set VCN block clockgating state
1333  */
1334 static int vcn_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1335 					    enum amd_clockgating_state state)
1336 {
1337 	struct amdgpu_device *adev = ip_block->adev;
1338 	bool enable = state == AMD_CG_STATE_GATE;
1339 	int i;
1340 
1341 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1342 		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
1343 
1344 		if (enable) {
1345 			if (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) != UVD_STATUS__IDLE)
1346 				return -EBUSY;
1347 			vcn_v5_0_1_enable_clock_gating(vinst);
1348 		} else {
1349 			vcn_v5_0_1_disable_clock_gating(vinst);
1350 		}
1351 	}
1352 
1353 	return 0;
1354 }
1355 
1356 static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst,
1357 				   enum amd_powergating_state state)
1358 {
1359 	struct amdgpu_device *adev = vinst->adev;
1360 	int ret = 0;
1361 
1362 	/* for SRIOV, guest should not control VCN Power-gating
1363 	 * MMSCH FW should control Power-gating and clock-gating
1364 	 * guest should avoid touching CGC and PG
1365 	 */
1366 	if (amdgpu_sriov_vf(adev)) {
1367 		vinst->cur_state = AMD_PG_STATE_UNGATE;
1368 		return 0;
1369 	}
1370 
1371 	if (state == vinst->cur_state)
1372 		return 0;
1373 
1374 	if (state == AMD_PG_STATE_GATE)
1375 		ret = vcn_v5_0_1_stop(vinst);
1376 	else
1377 		ret = vcn_v5_0_1_start(vinst);
1378 
1379 	if (!ret)
1380 		vinst->cur_state = state;
1381 
1382 	return ret;
1383 }
1384 
1385 /**
1386  * vcn_v5_0_1_process_interrupt - process VCN block interrupt
1387  *
1388  * @adev: amdgpu_device pointer
1389  * @source: interrupt sources
1390  * @entry: interrupt entry from clients and sources
1391  *
1392  * Process VCN block interrupt
1393  */
1394 static int vcn_v5_0_1_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
1395 	struct amdgpu_iv_entry *entry)
1396 {
1397 	uint32_t i, inst;
1398 
1399 	i = node_id_to_phys_map[entry->node_id];
1400 
1401 	DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n");
1402 
1403 	for (inst = 0; inst < adev->vcn.num_vcn_inst; ++inst)
1404 		if (adev->vcn.inst[inst].aid_id == i)
1405 			break;
1406 	if (inst >= adev->vcn.num_vcn_inst) {
1407 		dev_WARN_ONCE(adev->dev, 1,
1408 				"Interrupt received for unknown VCN instance %d",
1409 				entry->node_id);
1410 		return 0;
1411 	}
1412 
1413 	switch (entry->src_id) {
1414 	case VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1415 		amdgpu_fence_process(&adev->vcn.inst[inst].ring_enc[0]);
1416 		break;
1417 	default:
1418 		DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
1419 			  entry->src_id, entry->src_data[0]);
1420 		break;
1421 	}
1422 
1423 	return 0;
1424 }
1425 
1426 static int vcn_v5_0_1_set_ras_interrupt_state(struct amdgpu_device *adev,
1427 					struct amdgpu_irq_src *source,
1428 					unsigned int type,
1429 					enum amdgpu_interrupt_state state)
1430 {
1431 	return 0;
1432 }
1433 
1434 static const struct amdgpu_irq_src_funcs vcn_v5_0_1_irq_funcs = {
1435 	.process = vcn_v5_0_1_process_interrupt,
1436 };
1437 
1438 static const struct amdgpu_irq_src_funcs vcn_v5_0_1_ras_irq_funcs = {
1439 	.set = vcn_v5_0_1_set_ras_interrupt_state,
1440 	.process = amdgpu_vcn_process_poison_irq,
1441 };
1442 
1443 
1444 /**
1445  * vcn_v5_0_1_set_irq_funcs - set VCN block interrupt irq functions
1446  *
1447  * @adev: amdgpu_device pointer
1448  *
1449  * Set VCN block interrupt irq functions
1450  */
1451 static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
1452 {
1453 	int i;
1454 
1455 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
1456 		adev->vcn.inst->irq.num_types++;
1457 
1458 	adev->vcn.inst->irq.funcs = &vcn_v5_0_1_irq_funcs;
1459 
1460 	adev->vcn.inst->ras_poison_irq.num_types = 1;
1461 	adev->vcn.inst->ras_poison_irq.funcs = &vcn_v5_0_1_ras_irq_funcs;
1462 
1463 }
1464 
1465 static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = {
1466 	.name = "vcn_v5_0_1",
1467 	.early_init = vcn_v5_0_1_early_init,
1468 	.late_init = NULL,
1469 	.sw_init = vcn_v5_0_1_sw_init,
1470 	.sw_fini = vcn_v5_0_1_sw_fini,
1471 	.hw_init = vcn_v5_0_1_hw_init,
1472 	.hw_fini = vcn_v5_0_1_hw_fini,
1473 	.suspend = vcn_v5_0_1_suspend,
1474 	.resume = vcn_v5_0_1_resume,
1475 	.is_idle = vcn_v5_0_1_is_idle,
1476 	.wait_for_idle = vcn_v5_0_1_wait_for_idle,
1477 	.check_soft_reset = NULL,
1478 	.pre_soft_reset = NULL,
1479 	.soft_reset = NULL,
1480 	.post_soft_reset = NULL,
1481 	.set_clockgating_state = vcn_v5_0_1_set_clockgating_state,
1482 	.set_powergating_state = vcn_set_powergating_state,
1483 	.dump_ip_state = vcn_v5_0_0_dump_ip_state,
1484 	.print_ip_state = vcn_v5_0_0_print_ip_state,
1485 };
1486 
1487 const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block = {
1488 	.type = AMD_IP_BLOCK_TYPE_VCN,
1489 	.major = 5,
1490 	.minor = 0,
1491 	.rev = 1,
1492 	.funcs = &vcn_v5_0_1_ip_funcs,
1493 };
1494 
1495 static uint32_t vcn_v5_0_1_query_poison_by_instance(struct amdgpu_device *adev,
1496 			uint32_t instance, uint32_t sub_block)
1497 {
1498 	uint32_t poison_stat = 0, reg_value = 0;
1499 
1500 	switch (sub_block) {
1501 	case AMDGPU_VCN_V5_0_1_VCPU_VCODEC:
1502 		reg_value = RREG32_SOC15(VCN, instance, regUVD_RAS_VCPU_VCODEC_STATUS);
1503 		poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
1504 		break;
1505 	default:
1506 		break;
1507 	}
1508 
1509 	if (poison_stat)
1510 		dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
1511 			instance, sub_block);
1512 
1513 	return poison_stat;
1514 }
1515 
1516 static bool vcn_v5_0_1_query_poison_status(struct amdgpu_device *adev)
1517 {
1518 	uint32_t inst, sub;
1519 	uint32_t poison_stat = 0;
1520 
1521 	for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
1522 		for (sub = 0; sub < AMDGPU_VCN_V5_0_1_MAX_SUB_BLOCK; sub++)
1523 			poison_stat +=
1524 			vcn_v5_0_1_query_poison_by_instance(adev, inst, sub);
1525 
1526 	return !!poison_stat;
1527 }
1528 
1529 static const struct amdgpu_ras_block_hw_ops vcn_v5_0_1_ras_hw_ops = {
1530 	.query_poison_status = vcn_v5_0_1_query_poison_status,
1531 };
1532 
1533 static int vcn_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
1534 				      enum aca_smu_type type, void *data)
1535 {
1536 	struct aca_bank_info info;
1537 	u64 misc0;
1538 	int ret;
1539 
1540 	ret = aca_bank_info_decode(bank, &info);
1541 	if (ret)
1542 		return ret;
1543 
1544 	misc0 = bank->regs[ACA_REG_IDX_MISC0];
1545 	switch (type) {
1546 	case ACA_SMU_TYPE_UE:
1547 		bank->aca_err_type = ACA_ERROR_TYPE_UE;
1548 		ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
1549 						     1ULL);
1550 		break;
1551 	case ACA_SMU_TYPE_CE:
1552 		bank->aca_err_type = ACA_ERROR_TYPE_CE;
1553 		ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
1554 						     ACA_REG__MISC0__ERRCNT(misc0));
1555 		break;
1556 	default:
1557 		return -EINVAL;
1558 	}
1559 
1560 	return ret;
1561 }
1562 
1563 /* reference to smu driver if header file */
1564 static int vcn_v5_0_1_err_codes[] = {
1565 	14, 15, /* VCN */
1566 };
1567 
1568 static bool vcn_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
1569 					 enum aca_smu_type type, void *data)
1570 {
1571 	u32 instlo;
1572 
1573 	instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
1574 	instlo &= GENMASK(31, 1);
1575 
1576 	if (instlo != mmSMNAID_AID0_MCA_SMU)
1577 		return false;
1578 
1579 	if (aca_bank_check_error_codes(handle->adev, bank,
1580 				       vcn_v5_0_1_err_codes,
1581 				       ARRAY_SIZE(vcn_v5_0_1_err_codes)))
1582 		return false;
1583 
1584 	return true;
1585 }
1586 
1587 static const struct aca_bank_ops vcn_v5_0_1_aca_bank_ops = {
1588 	.aca_bank_parser = vcn_v5_0_1_aca_bank_parser,
1589 	.aca_bank_is_valid = vcn_v5_0_1_aca_bank_is_valid,
1590 };
1591 
1592 static const struct aca_info vcn_v5_0_1_aca_info = {
1593 	.hwip = ACA_HWIP_TYPE_SMU,
1594 	.mask = ACA_ERROR_UE_MASK,
1595 	.bank_ops = &vcn_v5_0_1_aca_bank_ops,
1596 };
1597 
1598 static int vcn_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
1599 {
1600 	int r;
1601 
1602 	r = amdgpu_ras_block_late_init(adev, ras_block);
1603 	if (r)
1604 		return r;
1605 
1606 	r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__VCN,
1607 				&vcn_v5_0_1_aca_info, NULL);
1608 	if (r)
1609 		goto late_fini;
1610 
1611 	return 0;
1612 
1613 late_fini:
1614 	amdgpu_ras_block_late_fini(adev, ras_block);
1615 
1616 	return r;
1617 }
1618 
1619 static struct amdgpu_vcn_ras vcn_v5_0_1_ras = {
1620 	.ras_block = {
1621 		.hw_ops = &vcn_v5_0_1_ras_hw_ops,
1622 		.ras_late_init = vcn_v5_0_1_ras_late_init,
1623 	},
1624 };
1625 
1626 static void vcn_v5_0_1_set_ras_funcs(struct amdgpu_device *adev)
1627 {
1628 	adev->vcn.ras = &vcn_v5_0_1_ras;
1629 }
1630