xref: /linux/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c (revision 0b9647d40ef82837d5025de6daad64db775ea1c5)
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <drm/drm_drv.h>
26 
27 #include "amdgpu.h"
28 #include "amdgpu_vcn.h"
29 #include "amdgpu_pm.h"
30 #include "soc15.h"
31 #include "soc15d.h"
32 #include "soc15_hw_ip.h"
33 #include "vcn_v2_0.h"
34 #include "vcn_v4_0_3.h"
35 #include "mmsch_v4_0_3.h"
36 
37 #include "vcn/vcn_4_0_3_offset.h"
38 #include "vcn/vcn_4_0_3_sh_mask.h"
39 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
40 
41 #define mmUVD_DPG_LMA_CTL		regUVD_DPG_LMA_CTL
42 #define mmUVD_DPG_LMA_CTL_BASE_IDX	regUVD_DPG_LMA_CTL_BASE_IDX
43 #define mmUVD_DPG_LMA_DATA		regUVD_DPG_LMA_DATA
44 #define mmUVD_DPG_LMA_DATA_BASE_IDX	regUVD_DPG_LMA_DATA_BASE_IDX
45 
46 #define VCN_VID_SOC_ADDRESS_2_0		0x1fb00
47 #define VCN1_VID_SOC_ADDRESS_3_0	0x48300
48 
49 static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_3[] = {
50 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
51 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
52 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
53 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
54 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
55 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
56 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
57 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
58 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
59 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
60 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
61 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
62 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
63 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
64 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
65 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
66 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
67 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
68 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
69 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
70 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
71 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
72 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
73 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
74 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
75 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
76 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
77 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_CONFIG),
78 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_STATUS),
79 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
80 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
81 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
82 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
83 };
84 
85 #define NORMALIZE_VCN_REG_OFFSET(offset) \
86 		(offset & 0x1FFFF)
87 
88 static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev);
89 static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev);
90 static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev);
91 static int vcn_v4_0_3_set_pg_state(struct amdgpu_vcn_inst *vinst,
92 				   enum amd_powergating_state state);
93 static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
94 				     struct dpg_pause_state *new_state);
95 static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring);
96 static void vcn_v4_0_3_set_ras_funcs(struct amdgpu_device *adev);
97 static void vcn_v4_0_3_enable_ras(struct amdgpu_device *adev,
98 				  int inst_idx, bool indirect);
99 
100 static inline bool vcn_v4_0_3_normalizn_reqd(struct amdgpu_device *adev)
101 {
102 	return (adev->vcn.caps & AMDGPU_VCN_CAPS(RRMT_ENABLED)) == 0;
103 }
104 
105 /**
106  * vcn_v4_0_3_early_init - set function pointers
107  *
108  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
109  *
110  * Set ring and irq function pointers
111  */
112 static int vcn_v4_0_3_early_init(struct amdgpu_ip_block *ip_block)
113 {
114 	struct amdgpu_device *adev = ip_block->adev;
115 	int i, r;
116 
117 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
118 		/* re-use enc ring as unified ring */
119 		adev->vcn.inst[i].num_enc_rings = 1;
120 
121 	vcn_v4_0_3_set_unified_ring_funcs(adev);
122 	vcn_v4_0_3_set_irq_funcs(adev);
123 	vcn_v4_0_3_set_ras_funcs(adev);
124 
125 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
126 		adev->vcn.inst[i].set_pg_state = vcn_v4_0_3_set_pg_state;
127 
128 		r = amdgpu_vcn_early_init(adev, i);
129 		if (r)
130 			return r;
131 	}
132 
133 	return 0;
134 }
135 
136 static int vcn_v4_0_3_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
137 {
138 	struct amdgpu_vcn4_fw_shared *fw_shared;
139 
140 	fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
141 	fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
142 	fw_shared->sq.is_enabled = 1;
143 
144 	if (amdgpu_vcnfw_log)
145 		amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]);
146 
147 	return 0;
148 }
149 
150 /**
151  * vcn_v4_0_3_sw_init - sw init for VCN block
152  *
153  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
154  *
155  * Load firmware and sw initialization
156  */
157 static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
158 {
159 	struct amdgpu_device *adev = ip_block->adev;
160 	struct amdgpu_ring *ring;
161 	int i, r, vcn_inst;
162 	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
163 	uint32_t *ptr;
164 
165 	/* VCN DEC TRAP */
166 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
167 		VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq);
168 	if (r)
169 		return r;
170 
171 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
172 
173 		r = amdgpu_vcn_sw_init(adev, i);
174 		if (r)
175 			return r;
176 
177 		amdgpu_vcn_setup_ucode(adev, i);
178 
179 		r = amdgpu_vcn_resume(adev, i);
180 		if (r)
181 			return r;
182 
183 		vcn_inst = GET_INST(VCN, i);
184 
185 		ring = &adev->vcn.inst[i].ring_enc[0];
186 		ring->use_doorbell = true;
187 
188 		if (!amdgpu_sriov_vf(adev))
189 			ring->doorbell_index =
190 				(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
191 				9 * vcn_inst;
192 		else
193 			ring->doorbell_index =
194 				(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
195 				32 * vcn_inst;
196 
197 		ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
198 		sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
199 		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
200 				     AMDGPU_RING_PRIO_DEFAULT,
201 				     &adev->vcn.inst[i].sched_score);
202 		if (r)
203 			return r;
204 
205 		vcn_v4_0_3_fw_shared_init(adev, i);
206 
207 		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
208 			adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode;
209 	}
210 
211 	/* TODO: Add queue reset mask when FW fully supports it */
212 	adev->vcn.supported_reset =
213 		amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
214 
215 	if (amdgpu_sriov_vf(adev)) {
216 		r = amdgpu_virt_alloc_mm_table(adev);
217 		if (r)
218 			return r;
219 	}
220 
221 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) {
222 		r = amdgpu_vcn_ras_sw_init(adev);
223 		if (r) {
224 			dev_err(adev->dev, "Failed to initialize vcn ras block!\n");
225 			return r;
226 		}
227 	}
228 
229 	/* Allocate memory for VCN IP Dump buffer */
230 	ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
231 	if (!ptr) {
232 		DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
233 		adev->vcn.ip_dump = NULL;
234 	} else {
235 		adev->vcn.ip_dump = ptr;
236 	}
237 
238 	r = amdgpu_vcn_sysfs_reset_mask_init(adev);
239 	if (r)
240 		return r;
241 
242 	return 0;
243 }
244 
245 /**
246  * vcn_v4_0_3_sw_fini - sw fini for VCN block
247  *
248  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
249  *
250  * VCN suspend and free up sw allocation
251  */
252 static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
253 {
254 	struct amdgpu_device *adev = ip_block->adev;
255 	int i, r, idx;
256 
257 	if (drm_dev_enter(&adev->ddev, &idx)) {
258 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
259 			volatile struct amdgpu_vcn4_fw_shared *fw_shared;
260 
261 			fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
262 			fw_shared->present_flag_0 = 0;
263 			fw_shared->sq.is_enabled = cpu_to_le32(false);
264 		}
265 		drm_dev_exit(idx);
266 	}
267 
268 	if (amdgpu_sriov_vf(adev))
269 		amdgpu_virt_free_mm_table(adev);
270 
271 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
272 		r = amdgpu_vcn_suspend(adev, i);
273 		if (r)
274 			return r;
275 	}
276 
277 	amdgpu_vcn_sysfs_reset_mask_fini(adev);
278 
279 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
280 		r = amdgpu_vcn_sw_fini(adev, i);
281 		if (r)
282 			return r;
283 	}
284 
285 	kfree(adev->vcn.ip_dump);
286 
287 	return 0;
288 }
289 
290 /**
291  * vcn_v4_0_3_hw_init - start and test VCN block
292  *
293  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
294  *
295  * Initialize the hardware, boot up the VCPU and do some testing
296  */
297 static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
298 {
299 	struct amdgpu_device *adev = ip_block->adev;
300 	struct amdgpu_ring *ring;
301 	int i, r, vcn_inst;
302 
303 	if (amdgpu_sriov_vf(adev)) {
304 		r = vcn_v4_0_3_start_sriov(adev);
305 		if (r)
306 			return r;
307 
308 		for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
309 			ring = &adev->vcn.inst[i].ring_enc[0];
310 			ring->wptr = 0;
311 			ring->wptr_old = 0;
312 			vcn_v4_0_3_unified_ring_set_wptr(ring);
313 			ring->sched.ready = true;
314 		}
315 	} else {
316 		/* This flag is not set for VF, assumed to be disabled always */
317 		if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) &
318 		    0x100)
319 			adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
320 
321 		for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
322 			struct amdgpu_vcn4_fw_shared *fw_shared;
323 
324 			vcn_inst = GET_INST(VCN, i);
325 			ring = &adev->vcn.inst[i].ring_enc[0];
326 
327 			if (ring->use_doorbell) {
328 				adev->nbio.funcs->vcn_doorbell_range(
329 					adev, ring->use_doorbell,
330 					(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
331 						9 * vcn_inst,
332 					adev->vcn.inst[i].aid_id);
333 
334 				WREG32_SOC15(
335 					VCN, GET_INST(VCN, ring->me),
336 					regVCN_RB1_DB_CTRL,
337 					ring->doorbell_index
338 							<< VCN_RB1_DB_CTRL__OFFSET__SHIFT |
339 						VCN_RB1_DB_CTRL__EN_MASK);
340 
341 				/* Read DB_CTRL to flush the write DB_CTRL command. */
342 				RREG32_SOC15(
343 					VCN, GET_INST(VCN, ring->me),
344 					regVCN_RB1_DB_CTRL);
345 			}
346 
347 			/* Re-init fw_shared when RAS fatal error occurred */
348 			fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
349 			if (!fw_shared->sq.is_enabled)
350 				vcn_v4_0_3_fw_shared_init(adev, i);
351 
352 			r = amdgpu_ring_test_helper(ring);
353 			if (r)
354 				return r;
355 		}
356 	}
357 
358 	return r;
359 }
360 
361 /**
362  * vcn_v4_0_3_hw_fini - stop the hardware block
363  *
364  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
365  *
366  * Stop the VCN block, mark ring as not ready any more
367  */
368 static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
369 {
370 	struct amdgpu_device *adev = ip_block->adev;
371 	int i;
372 
373 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
374 		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
375 
376 		cancel_delayed_work_sync(&vinst->idle_work);
377 
378 		if (vinst->cur_state != AMD_PG_STATE_GATE)
379 			vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
380 	}
381 
382 	return 0;
383 }
384 
385 /**
386  * vcn_v4_0_3_suspend - suspend VCN block
387  *
388  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
389  *
390  * HW fini and suspend VCN block
391  */
392 static int vcn_v4_0_3_suspend(struct amdgpu_ip_block *ip_block)
393 {
394 	struct amdgpu_device *adev = ip_block->adev;
395 	int r, i;
396 
397 	r = vcn_v4_0_3_hw_fini(ip_block);
398 	if (r)
399 		return r;
400 
401 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
402 		r = amdgpu_vcn_suspend(adev, i);
403 		if (r)
404 			return r;
405 	}
406 
407 	return 0;
408 }
409 
410 /**
411  * vcn_v4_0_3_resume - resume VCN block
412  *
413  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
414  *
415  * Resume firmware and hw init VCN block
416  */
417 static int vcn_v4_0_3_resume(struct amdgpu_ip_block *ip_block)
418 {
419 	struct amdgpu_device *adev = ip_block->adev;
420 	int r, i;
421 
422 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
423 		r = amdgpu_vcn_resume(ip_block->adev, i);
424 		if (r)
425 			return r;
426 	}
427 
428 	r = vcn_v4_0_3_hw_init(ip_block);
429 
430 	return r;
431 }
432 
433 /**
434  * vcn_v4_0_3_mc_resume - memory controller programming
435  *
436  * @vinst: VCN instance
437  *
438  * Let the VCN memory controller know it's offsets
439  */
440 static void vcn_v4_0_3_mc_resume(struct amdgpu_vcn_inst *vinst)
441 {
442 	struct amdgpu_device *adev = vinst->adev;
443 	int inst_idx = vinst->inst;
444 	uint32_t offset, size, vcn_inst;
445 	const struct common_firmware_header *hdr;
446 
447 	hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
448 	size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
449 
450 	vcn_inst = GET_INST(VCN, inst_idx);
451 	/* cache window 0: fw */
452 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
453 		WREG32_SOC15(
454 			VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
455 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx]
456 				 .tmr_mc_addr_lo));
457 		WREG32_SOC15(
458 			VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
459 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx]
460 				 .tmr_mc_addr_hi));
461 		WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, 0);
462 		offset = 0;
463 	} else {
464 		WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
465 			     lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr));
466 		WREG32_SOC15(VCN, vcn_inst,
467 			     regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
468 			     upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr));
469 		offset = size;
470 		WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0,
471 			     AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
472 	}
473 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE0, size);
474 
475 	/* cache window 1: stack */
476 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
477 		     lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset));
478 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
479 		     upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset));
480 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET1, 0);
481 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE1,
482 		     AMDGPU_VCN_STACK_SIZE);
483 
484 	/* cache window 2: context */
485 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
486 		     lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
487 				   AMDGPU_VCN_STACK_SIZE));
488 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
489 		     upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
490 				   AMDGPU_VCN_STACK_SIZE));
491 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET2, 0);
492 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE2,
493 		     AMDGPU_VCN_CONTEXT_SIZE);
494 
495 	/* non-cache window */
496 	WREG32_SOC15(
497 		VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
498 		lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr));
499 	WREG32_SOC15(
500 		VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
501 		upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr));
502 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
503 	WREG32_SOC15(
504 		VCN, vcn_inst, regUVD_VCPU_NONCACHE_SIZE0,
505 		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
506 }
507 
508 /**
509  * vcn_v4_0_3_mc_resume_dpg_mode - memory controller programming for dpg mode
510  *
511  * @vinst: VCN instance
512  * @indirect: indirectly write sram
513  *
514  * Let the VCN memory controller know it's offsets with dpg mode
515  */
516 static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
517 					  bool indirect)
518 {
519 	struct amdgpu_device *adev = vinst->adev;
520 	int inst_idx = vinst->inst;
521 	uint32_t offset, size;
522 	const struct common_firmware_header *hdr;
523 
524 	hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
525 	size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
526 
527 	/* cache window 0: fw */
528 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
529 		if (!indirect) {
530 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
531 				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
532 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
533 					inst_idx].tmr_mc_addr_lo), 0, indirect);
534 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
535 				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
536 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
537 					inst_idx].tmr_mc_addr_hi), 0, indirect);
538 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
539 				VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
540 		} else {
541 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
542 				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
543 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
544 				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
545 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
546 				VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
547 		}
548 		offset = 0;
549 	} else {
550 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
551 			VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
552 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
553 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
554 			VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
555 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
556 		offset = size;
557 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
558 			VCN, 0, regUVD_VCPU_CACHE_OFFSET0),
559 			AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
560 	}
561 
562 	if (!indirect)
563 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
564 			VCN, 0, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
565 	else
566 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
567 			VCN, 0, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
568 
569 	/* cache window 1: stack */
570 	if (!indirect) {
571 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
572 			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
573 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
574 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
575 			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
576 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
577 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
578 			VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
579 	} else {
580 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
581 			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
582 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
583 			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
584 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
585 			VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
586 	}
587 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
588 			VCN, 0, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
589 
590 	/* cache window 2: context */
591 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
592 			VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
593 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
594 				AMDGPU_VCN_STACK_SIZE), 0, indirect);
595 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
596 			VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
597 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
598 				AMDGPU_VCN_STACK_SIZE), 0, indirect);
599 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
600 			VCN, 0, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
601 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
602 			VCN, 0, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
603 
604 	/* non-cache window */
605 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
606 			VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
607 			lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
608 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
609 			VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
610 			upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
611 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
612 			VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
613 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
614 			VCN, 0, regUVD_VCPU_NONCACHE_SIZE0),
615 			AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
616 
617 	/* VCN global tiling registers */
618 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
619 		VCN, 0, regUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
620 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
621 		VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
622 }
623 
624 /**
625  * vcn_v4_0_3_disable_clock_gating - disable VCN clock gating
626  *
627  * @vinst: VCN instance
628  *
629  * Disable clock gating for VCN block
630  */
631 static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
632 {
633 	struct amdgpu_device *adev = vinst->adev;
634 	int inst_idx = vinst->inst;
635 	uint32_t data;
636 	int vcn_inst;
637 
638 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
639 		return;
640 
641 	vcn_inst = GET_INST(VCN, inst_idx);
642 
643 	/* VCN disable CGC */
644 	data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
645 	data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
646 	data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
647 	data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
648 	WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
649 
650 	data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_GATE);
651 	data &= ~(UVD_CGC_GATE__SYS_MASK
652 		| UVD_CGC_GATE__MPEG2_MASK
653 		| UVD_CGC_GATE__REGS_MASK
654 		| UVD_CGC_GATE__RBC_MASK
655 		| UVD_CGC_GATE__LMI_MC_MASK
656 		| UVD_CGC_GATE__LMI_UMC_MASK
657 		| UVD_CGC_GATE__MPC_MASK
658 		| UVD_CGC_GATE__LBSI_MASK
659 		| UVD_CGC_GATE__LRBBM_MASK
660 		| UVD_CGC_GATE__WCB_MASK
661 		| UVD_CGC_GATE__VCPU_MASK
662 		| UVD_CGC_GATE__MMSCH_MASK);
663 
664 	WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_GATE, data);
665 	SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_CGC_GATE, 0, 0xFFFFFFFF);
666 
667 	data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
668 	data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK
669 		| UVD_CGC_CTRL__MPEG2_MODE_MASK
670 		| UVD_CGC_CTRL__REGS_MODE_MASK
671 		| UVD_CGC_CTRL__RBC_MODE_MASK
672 		| UVD_CGC_CTRL__LMI_MC_MODE_MASK
673 		| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
674 		| UVD_CGC_CTRL__MPC_MODE_MASK
675 		| UVD_CGC_CTRL__LBSI_MODE_MASK
676 		| UVD_CGC_CTRL__LRBBM_MODE_MASK
677 		| UVD_CGC_CTRL__WCB_MODE_MASK
678 		| UVD_CGC_CTRL__VCPU_MODE_MASK
679 		| UVD_CGC_CTRL__MMSCH_MODE_MASK);
680 	WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
681 
682 	data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_GATE);
683 	data |= (UVD_SUVD_CGC_GATE__SRE_MASK
684 		| UVD_SUVD_CGC_GATE__SIT_MASK
685 		| UVD_SUVD_CGC_GATE__SMP_MASK
686 		| UVD_SUVD_CGC_GATE__SCM_MASK
687 		| UVD_SUVD_CGC_GATE__SDB_MASK
688 		| UVD_SUVD_CGC_GATE__SRE_H264_MASK
689 		| UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
690 		| UVD_SUVD_CGC_GATE__SIT_H264_MASK
691 		| UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
692 		| UVD_SUVD_CGC_GATE__SCM_H264_MASK
693 		| UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
694 		| UVD_SUVD_CGC_GATE__SDB_H264_MASK
695 		| UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
696 		| UVD_SUVD_CGC_GATE__ENT_MASK
697 		| UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
698 		| UVD_SUVD_CGC_GATE__SITE_MASK
699 		| UVD_SUVD_CGC_GATE__SRE_VP9_MASK
700 		| UVD_SUVD_CGC_GATE__SCM_VP9_MASK
701 		| UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
702 		| UVD_SUVD_CGC_GATE__SDB_VP9_MASK
703 		| UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
704 	WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_GATE, data);
705 
706 	data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL);
707 	data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
708 		| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
709 		| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
710 		| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
711 		| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
712 		| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
713 		| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
714 		| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
715 	WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL, data);
716 }
717 
718 /**
719  * vcn_v4_0_3_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
720  *
721  * @vinst: VCN instance
722  * @sram_sel: sram select
723  * @indirect: indirectly write sram
724  *
725  * Disable clock gating for VCN block with dpg mode
726  */
727 static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
728 						     uint8_t sram_sel,
729 						     uint8_t indirect)
730 {
731 	struct amdgpu_device *adev = vinst->adev;
732 	int inst_idx = vinst->inst;
733 	uint32_t reg_data = 0;
734 
735 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
736 		return;
737 
738 	/* enable sw clock gating control */
739 	reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
740 	reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
741 	reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
742 	reg_data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK |
743 		 UVD_CGC_CTRL__MPEG2_MODE_MASK |
744 		 UVD_CGC_CTRL__REGS_MODE_MASK |
745 		 UVD_CGC_CTRL__RBC_MODE_MASK |
746 		 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
747 		 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
748 		 UVD_CGC_CTRL__IDCT_MODE_MASK |
749 		 UVD_CGC_CTRL__MPRD_MODE_MASK |
750 		 UVD_CGC_CTRL__MPC_MODE_MASK |
751 		 UVD_CGC_CTRL__LBSI_MODE_MASK |
752 		 UVD_CGC_CTRL__LRBBM_MODE_MASK |
753 		 UVD_CGC_CTRL__WCB_MODE_MASK |
754 		 UVD_CGC_CTRL__VCPU_MODE_MASK);
755 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
756 		VCN, 0, regUVD_CGC_CTRL), reg_data, sram_sel, indirect);
757 
758 	/* turn off clock gating */
759 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
760 		VCN, 0, regUVD_CGC_GATE), 0, sram_sel, indirect);
761 
762 	/* turn on SUVD clock gating */
763 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
764 		VCN, 0, regUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
765 
766 	/* turn on sw mode in UVD_SUVD_CGC_CTRL */
767 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
768 		VCN, 0, regUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
769 }
770 
771 /**
772  * vcn_v4_0_3_enable_clock_gating - enable VCN clock gating
773  *
774  * @vinst: VCN instance
775  *
776  * Enable clock gating for VCN block
777  */
778 static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
779 {
780 	struct amdgpu_device *adev = vinst->adev;
781 	int inst_idx = vinst->inst;
782 	uint32_t data;
783 	int vcn_inst;
784 
785 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
786 		return;
787 
788 	vcn_inst = GET_INST(VCN, inst_idx);
789 
790 	/* enable VCN CGC */
791 	data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
792 	data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
793 	data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
794 	data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
795 	WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
796 
797 	data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
798 	data |= (UVD_CGC_CTRL__SYS_MODE_MASK
799 		| UVD_CGC_CTRL__MPEG2_MODE_MASK
800 		| UVD_CGC_CTRL__REGS_MODE_MASK
801 		| UVD_CGC_CTRL__RBC_MODE_MASK
802 		| UVD_CGC_CTRL__LMI_MC_MODE_MASK
803 		| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
804 		| UVD_CGC_CTRL__MPC_MODE_MASK
805 		| UVD_CGC_CTRL__LBSI_MODE_MASK
806 		| UVD_CGC_CTRL__LRBBM_MODE_MASK
807 		| UVD_CGC_CTRL__WCB_MODE_MASK
808 		| UVD_CGC_CTRL__VCPU_MODE_MASK);
809 	WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
810 
811 	data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL);
812 	data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
813 		| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
814 		| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
815 		| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
816 		| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
817 		| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
818 		| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
819 		| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
820 	WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL, data);
821 }
822 
823 /**
824  * vcn_v4_0_3_start_dpg_mode - VCN start with dpg mode
825  *
826  * @vinst: VCN instance
827  * @indirect: indirectly write sram
828  *
829  * Start VCN block with dpg mode
830  */
831 static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
832 				     bool indirect)
833 {
834 	struct amdgpu_device *adev = vinst->adev;
835 	int inst_idx = vinst->inst;
836 	volatile struct amdgpu_vcn4_fw_shared *fw_shared =
837 						adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
838 	struct amdgpu_ring *ring;
839 	int vcn_inst;
840 	uint32_t tmp;
841 
842 	vcn_inst = GET_INST(VCN, inst_idx);
843 	/* disable register anti-hang mechanism */
844 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 1,
845 		 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
846 	/* enable dynamic power gating mode */
847 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS);
848 	tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
849 	tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
850 	WREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS, tmp);
851 
852 	if (indirect) {
853 		DRM_DEV_DEBUG(adev->dev, "VCN %d start: on AID %d",
854 			inst_idx, adev->vcn.inst[inst_idx].aid_id);
855 		adev->vcn.inst[inst_idx].dpg_sram_curr_addr =
856 				(uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
857 		/* Use dummy register 0xDEADBEEF passing AID selection to PSP FW */
858 		WREG32_SOC15_DPG_MODE(inst_idx, 0xDEADBEEF,
859 			adev->vcn.inst[inst_idx].aid_id, 0, true);
860 	}
861 
862 	/* enable clock gating */
863 	vcn_v4_0_3_disable_clock_gating_dpg_mode(vinst, 0, indirect);
864 
865 	/* enable VCPU clock */
866 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
867 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
868 	tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
869 
870 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
871 		VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
872 
873 	/* disable master interrupt */
874 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
875 		VCN, 0, regUVD_MASTINT_EN), 0, 0, indirect);
876 
877 	/* setup regUVD_LMI_CTRL */
878 	tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
879 		UVD_LMI_CTRL__REQ_MODE_MASK |
880 		UVD_LMI_CTRL__CRC_RESET_MASK |
881 		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
882 		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
883 		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
884 		(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
885 		0x00100000L);
886 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
887 		VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect);
888 
889 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
890 		VCN, 0, regUVD_MPC_CNTL),
891 		0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
892 
893 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
894 		VCN, 0, regUVD_MPC_SET_MUXA0),
895 		((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
896 		 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
897 		 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
898 		 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
899 
900 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
901 		VCN, 0, regUVD_MPC_SET_MUXB0),
902 		 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
903 		 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
904 		 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
905 		 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
906 
907 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
908 		VCN, 0, regUVD_MPC_SET_MUX),
909 		((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
910 		 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
911 		 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
912 
913 	vcn_v4_0_3_mc_resume_dpg_mode(vinst, indirect);
914 
915 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
916 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
917 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
918 		VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
919 
920 	/* enable LMI MC and UMC channels */
921 	tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
922 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
923 		VCN, 0, regUVD_LMI_CTRL2), tmp, 0, indirect);
924 
925 	vcn_v4_0_3_enable_ras(adev, inst_idx, indirect);
926 
927 	/* enable master interrupt */
928 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
929 		VCN, 0, regUVD_MASTINT_EN),
930 		UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
931 
932 	if (indirect)
933 		amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
934 
935 	ring = &adev->vcn.inst[inst_idx].ring_enc[0];
936 
937 	/* program the RB_BASE for ring buffer */
938 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO,
939 		     lower_32_bits(ring->gpu_addr));
940 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI,
941 		     upper_32_bits(ring->gpu_addr));
942 
943 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE,
944 		     ring->ring_size / sizeof(uint32_t));
945 
946 	/* resetting ring, fw should not check RB ring */
947 	tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
948 	tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
949 	WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
950 	fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
951 
952 	/* Initialize the ring buffer's read and write pointers */
953 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
954 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
955 	ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
956 
957 	tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
958 	tmp |= VCN_RB_ENABLE__RB_EN_MASK;
959 	WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
960 	fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
961 
962 	/*resetting done, fw can check RB ring */
963 	fw_shared->sq.queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
964 
965 	return 0;
966 }
967 
968 static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
969 {
970 	int i, vcn_inst;
971 	struct amdgpu_ring *ring_enc;
972 	uint64_t cache_addr;
973 	uint64_t rb_enc_addr;
974 	uint64_t ctx_addr;
975 	uint32_t param, resp, expected;
976 	uint32_t offset, cache_size;
977 	uint32_t tmp, timeout;
978 
979 	struct amdgpu_mm_table *table = &adev->virt.mm_table;
980 	uint32_t *table_loc;
981 	uint32_t table_size;
982 	uint32_t size, size_dw;
983 	uint32_t init_status;
984 	uint32_t enabled_vcn;
985 
986 	struct mmsch_v4_0_cmd_direct_write
987 		direct_wt = { {0} };
988 	struct mmsch_v4_0_cmd_direct_read_modify_write
989 		direct_rd_mod_wt = { {0} };
990 	struct mmsch_v4_0_cmd_end end = { {0} };
991 	struct mmsch_v4_0_3_init_header header;
992 
993 	volatile struct amdgpu_vcn4_fw_shared *fw_shared;
994 	volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
995 
996 	direct_wt.cmd_header.command_type =
997 		MMSCH_COMMAND__DIRECT_REG_WRITE;
998 	direct_rd_mod_wt.cmd_header.command_type =
999 		MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1000 	end.cmd_header.command_type = MMSCH_COMMAND__END;
1001 
1002 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1003 		vcn_inst = GET_INST(VCN, i);
1004 
1005 		vcn_v4_0_3_fw_shared_init(adev, vcn_inst);
1006 
1007 		memset(&header, 0, sizeof(struct mmsch_v4_0_3_init_header));
1008 		header.version = MMSCH_VERSION;
1009 		header.total_size = sizeof(struct mmsch_v4_0_3_init_header) >> 2;
1010 
1011 		table_loc = (uint32_t *)table->cpu_addr;
1012 		table_loc += header.total_size;
1013 
1014 		table_size = 0;
1015 
1016 		MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS),
1017 			~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1018 
1019 		cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
1020 
1021 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1022 			MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1023 				regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1024 				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1025 
1026 			MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1027 				regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1028 				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1029 
1030 			offset = 0;
1031 			MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1032 				regUVD_VCPU_CACHE_OFFSET0), 0);
1033 		} else {
1034 			MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1035 				regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1036 				lower_32_bits(adev->vcn.inst[i].gpu_addr));
1037 			MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1038 				regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1039 				upper_32_bits(adev->vcn.inst[i].gpu_addr));
1040 			offset = cache_size;
1041 			MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1042 				regUVD_VCPU_CACHE_OFFSET0),
1043 				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1044 		}
1045 
1046 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1047 			regUVD_VCPU_CACHE_SIZE0),
1048 			cache_size);
1049 
1050 		cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset;
1051 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1052 			regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), lower_32_bits(cache_addr));
1053 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1054 			regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), upper_32_bits(cache_addr));
1055 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1056 			regUVD_VCPU_CACHE_OFFSET1), 0);
1057 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1058 			regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE);
1059 
1060 		cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset +
1061 			AMDGPU_VCN_STACK_SIZE;
1062 
1063 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1064 			regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), lower_32_bits(cache_addr));
1065 
1066 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1067 			regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), upper_32_bits(cache_addr));
1068 
1069 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1070 			regUVD_VCPU_CACHE_OFFSET2), 0);
1071 
1072 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1073 			regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE);
1074 
1075 		fw_shared = adev->vcn.inst[vcn_inst].fw_shared.cpu_addr;
1076 		rb_setup = &fw_shared->rb_setup;
1077 
1078 		ring_enc = &adev->vcn.inst[vcn_inst].ring_enc[0];
1079 		ring_enc->wptr = 0;
1080 		rb_enc_addr = ring_enc->gpu_addr;
1081 
1082 		rb_setup->is_rb_enabled_flags |= RB_ENABLED;
1083 		rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr);
1084 		rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr);
1085 		rb_setup->rb_size = ring_enc->ring_size / 4;
1086 		fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
1087 
1088 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1089 			regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
1090 			lower_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr));
1091 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1092 			regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
1093 			upper_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr));
1094 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1095 			regUVD_VCPU_NONCACHE_SIZE0),
1096 			AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
1097 		MMSCH_V4_0_INSERT_END();
1098 
1099 		header.vcn0.init_status = 0;
1100 		header.vcn0.table_offset = header.total_size;
1101 		header.vcn0.table_size = table_size;
1102 		header.total_size += table_size;
1103 
1104 		/* Send init table to mmsch */
1105 		size = sizeof(struct mmsch_v4_0_3_init_header);
1106 		table_loc = (uint32_t *)table->cpu_addr;
1107 		memcpy((void *)table_loc, &header, size);
1108 
1109 		ctx_addr = table->gpu_addr;
1110 		WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
1111 		WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
1112 
1113 		tmp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID);
1114 		tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1115 		tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1116 		WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID, tmp);
1117 
1118 		size = header.total_size;
1119 		WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_SIZE, size);
1120 
1121 		WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP, 0);
1122 
1123 		param = 0x00000001;
1124 		WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_HOST, param);
1125 		tmp = 0;
1126 		timeout = 1000;
1127 		resp = 0;
1128 		expected = MMSCH_VF_MAILBOX_RESP__OK;
1129 		while (resp != expected) {
1130 			resp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP);
1131 			if (resp != 0)
1132 				break;
1133 
1134 			udelay(10);
1135 			tmp = tmp + 10;
1136 			if (tmp >= timeout) {
1137 				DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
1138 					" waiting for regMMSCH_VF_MAILBOX_RESP "\
1139 					"(expected=0x%08x, readback=0x%08x)\n",
1140 					tmp, expected, resp);
1141 				return -EBUSY;
1142 			}
1143 		}
1144 
1145 		enabled_vcn = amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 0) ? 1 : 0;
1146 		init_status = ((struct mmsch_v4_0_3_init_header *)(table_loc))->vcn0.init_status;
1147 		if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE
1148 					&& init_status != MMSCH_VF_ENGINE_STATUS__PASS) {
1149 			DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init "\
1150 				"status for VCN%x: 0x%x\n", resp, enabled_vcn, init_status);
1151 		}
1152 	}
1153 
1154 	return 0;
1155 }
1156 
1157 /**
1158  * vcn_v4_0_3_start - VCN start
1159  *
1160  * @vinst: VCN instance
1161  *
1162  * Start VCN block
1163  */
1164 static int vcn_v4_0_3_start(struct amdgpu_vcn_inst *vinst)
1165 {
1166 	struct amdgpu_device *adev = vinst->adev;
1167 	int i = vinst->inst;
1168 	volatile struct amdgpu_vcn4_fw_shared *fw_shared;
1169 	struct amdgpu_ring *ring;
1170 	int j, k, r, vcn_inst;
1171 	uint32_t tmp;
1172 
1173 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1174 		return vcn_v4_0_3_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
1175 
1176 	vcn_inst = GET_INST(VCN, i);
1177 	/* set VCN status busy */
1178 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) |
1179 		UVD_STATUS__UVD_BUSY;
1180 	WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
1181 
1182 	/* SW clock gating */
1183 	vcn_v4_0_3_disable_clock_gating(vinst);
1184 
1185 	/* enable VCPU clock */
1186 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
1187 		 UVD_VCPU_CNTL__CLK_EN_MASK,
1188 		 ~UVD_VCPU_CNTL__CLK_EN_MASK);
1189 
1190 	/* disable master interrupt */
1191 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
1192 		 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1193 
1194 	/* enable LMI MC and UMC channels */
1195 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
1196 		 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1197 
1198 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
1199 	tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1200 	tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1201 	WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
1202 
1203 	/* setup regUVD_LMI_CTRL */
1204 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
1205 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL,
1206 		     tmp | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1207 		     UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1208 		     UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1209 		     UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1210 
1211 	/* setup regUVD_MPC_CNTL */
1212 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL);
1213 	tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1214 	tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1215 	WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL, tmp);
1216 
1217 	/* setup UVD_MPC_SET_MUXA0 */
1218 	WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXA0,
1219 		     ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1220 		      (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1221 		      (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1222 		      (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1223 
1224 	/* setup UVD_MPC_SET_MUXB0 */
1225 	WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXB0,
1226 		     ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1227 		      (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1228 		      (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1229 		      (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1230 
1231 	/* setup UVD_MPC_SET_MUX */
1232 	WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUX,
1233 		     ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1234 		      (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1235 		      (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1236 
1237 	vcn_v4_0_3_mc_resume(vinst);
1238 
1239 	/* VCN global tiling registers */
1240 	WREG32_SOC15(VCN, vcn_inst, regUVD_GFX8_ADDR_CONFIG,
1241 		     adev->gfx.config.gb_addr_config);
1242 	WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
1243 		     adev->gfx.config.gb_addr_config);
1244 
1245 	/* unblock VCPU register access */
1246 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
1247 		 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1248 
1249 	/* release VCPU reset to boot */
1250 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
1251 		 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1252 
1253 	for (j = 0; j < 10; ++j) {
1254 		uint32_t status;
1255 
1256 		for (k = 0; k < 100; ++k) {
1257 			status = RREG32_SOC15(VCN, vcn_inst,
1258 					      regUVD_STATUS);
1259 			if (status & 2)
1260 				break;
1261 			mdelay(10);
1262 		}
1263 		r = 0;
1264 		if (status & 2)
1265 			break;
1266 
1267 		DRM_DEV_ERROR(adev->dev,
1268 			      "VCN decode not responding, trying to reset the VCPU!!!\n");
1269 		WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
1270 					  regUVD_VCPU_CNTL),
1271 			 UVD_VCPU_CNTL__BLK_RST_MASK,
1272 			 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1273 		mdelay(10);
1274 		WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
1275 					  regUVD_VCPU_CNTL),
1276 			 0, ~UVD_VCPU_CNTL__BLK_RST_MASK);
1277 
1278 		mdelay(10);
1279 		r = -1;
1280 	}
1281 
1282 	if (r) {
1283 		DRM_DEV_ERROR(adev->dev, "VCN decode not responding, giving up!!!\n");
1284 		return r;
1285 	}
1286 
1287 	/* enable master interrupt */
1288 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
1289 		 UVD_MASTINT_EN__VCPU_EN_MASK,
1290 		 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1291 
1292 	/* clear the busy bit of VCN_STATUS */
1293 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
1294 		 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1295 
1296 	ring = &adev->vcn.inst[i].ring_enc[0];
1297 	fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1298 
1299 	/* program the RB_BASE for ring buffer */
1300 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO,
1301 		     lower_32_bits(ring->gpu_addr));
1302 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI,
1303 		     upper_32_bits(ring->gpu_addr));
1304 
1305 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE,
1306 		     ring->ring_size / sizeof(uint32_t));
1307 
1308 	/* resetting ring, fw should not check RB ring */
1309 	tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
1310 	tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
1311 	WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
1312 
1313 	/* Initialize the ring buffer's read and write pointers */
1314 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
1315 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
1316 
1317 	tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
1318 	tmp |= VCN_RB_ENABLE__RB_EN_MASK;
1319 	WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
1320 
1321 	ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
1322 	fw_shared->sq.queue_mode &=
1323 		cpu_to_le32(~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF));
1324 
1325 	return 0;
1326 }
1327 
1328 /**
1329  * vcn_v4_0_3_stop_dpg_mode - VCN stop with dpg mode
1330  *
1331  * @vinst: VCN instance
1332  *
1333  * Stop VCN block with dpg mode
1334  */
1335 static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
1336 {
1337 	struct amdgpu_device *adev = vinst->adev;
1338 	int inst_idx = vinst->inst;
1339 	uint32_t tmp;
1340 	int vcn_inst;
1341 
1342 	vcn_inst = GET_INST(VCN, inst_idx);
1343 
1344 	/* Wait for power status to be 1 */
1345 	SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
1346 			   UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1347 
1348 	/* wait for read ptr to be equal to write ptr */
1349 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
1350 	SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1351 
1352 	SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
1353 			   UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1354 
1355 	/* disable dynamic power gating mode */
1356 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0,
1357 		 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1358 	return 0;
1359 }
1360 
1361 /**
1362  * vcn_v4_0_3_stop - VCN stop
1363  *
1364  * @vinst: VCN instance
1365  *
1366  * Stop VCN block
1367  */
1368 static int vcn_v4_0_3_stop(struct amdgpu_vcn_inst *vinst)
1369 {
1370 	struct amdgpu_device *adev = vinst->adev;
1371 	int i = vinst->inst;
1372 	volatile struct amdgpu_vcn4_fw_shared *fw_shared;
1373 	int r = 0, vcn_inst;
1374 	uint32_t tmp;
1375 
1376 	vcn_inst = GET_INST(VCN, i);
1377 
1378 	fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1379 	fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
1380 
1381 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1382 		vcn_v4_0_3_stop_dpg_mode(vinst);
1383 		goto Done;
1384 	}
1385 
1386 	/* wait for vcn idle */
1387 	r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS,
1388 			       UVD_STATUS__IDLE, 0x7);
1389 	if (r)
1390 		goto Done;
1391 
1392 	tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1393 		UVD_LMI_STATUS__READ_CLEAN_MASK |
1394 		UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1395 		UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1396 	r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
1397 			       tmp);
1398 	if (r)
1399 		goto Done;
1400 
1401 	/* stall UMC channel */
1402 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
1403 	tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1404 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
1405 	tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1406 		UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1407 	r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
1408 			       tmp);
1409 	if (r)
1410 		goto Done;
1411 
1412 	/* Unblock VCPU Register access */
1413 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
1414 		 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1415 		 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1416 
1417 	/* release VCPU reset to boot */
1418 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
1419 		 UVD_VCPU_CNTL__BLK_RST_MASK,
1420 		 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1421 
1422 	/* disable VCPU clock */
1423 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
1424 		 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1425 
1426 	/* reset LMI UMC/LMI/VCPU */
1427 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
1428 	tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1429 	WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
1430 
1431 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
1432 	tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1433 	WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
1434 
1435 	/* clear VCN status */
1436 	WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
1437 
1438 	/* apply HW clock gating */
1439 	vcn_v4_0_3_enable_clock_gating(vinst);
1440 
1441 Done:
1442 	return 0;
1443 }
1444 
1445 /**
1446  * vcn_v4_0_3_pause_dpg_mode - VCN pause with dpg mode
1447  *
1448  * @vinst: VCN instance
1449  * @new_state: pause state
1450  *
1451  * Pause dpg mode for VCN block
1452  */
1453 static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
1454 				     struct dpg_pause_state *new_state)
1455 {
1456 
1457 	return 0;
1458 }
1459 
1460 /**
1461  * vcn_v4_0_3_unified_ring_get_rptr - get unified read pointer
1462  *
1463  * @ring: amdgpu_ring pointer
1464  *
1465  * Returns the current hardware unified read pointer
1466  */
1467 static uint64_t vcn_v4_0_3_unified_ring_get_rptr(struct amdgpu_ring *ring)
1468 {
1469 	struct amdgpu_device *adev = ring->adev;
1470 
1471 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1472 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1473 
1474 	return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_RPTR);
1475 }
1476 
1477 /**
1478  * vcn_v4_0_3_unified_ring_get_wptr - get unified write pointer
1479  *
1480  * @ring: amdgpu_ring pointer
1481  *
1482  * Returns the current hardware unified write pointer
1483  */
1484 static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring)
1485 {
1486 	struct amdgpu_device *adev = ring->adev;
1487 
1488 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1489 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1490 
1491 	if (ring->use_doorbell)
1492 		return *ring->wptr_cpu_addr;
1493 	else
1494 		return RREG32_SOC15(VCN, GET_INST(VCN, ring->me),
1495 				    regUVD_RB_WPTR);
1496 }
1497 
1498 void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1499 				       uint32_t val, uint32_t mask)
1500 {
1501 	/* Use normalized offsets when required */
1502 	if (vcn_v4_0_3_normalizn_reqd(ring->adev))
1503 		reg = NORMALIZE_VCN_REG_OFFSET(reg);
1504 
1505 	amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1506 	amdgpu_ring_write(ring, reg << 2);
1507 	amdgpu_ring_write(ring, mask);
1508 	amdgpu_ring_write(ring, val);
1509 }
1510 
1511 void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
1512 				   uint32_t val)
1513 {
1514 	/* Use normalized offsets when required */
1515 	if (vcn_v4_0_3_normalizn_reqd(ring->adev))
1516 		reg = NORMALIZE_VCN_REG_OFFSET(reg);
1517 
1518 	amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1519 	amdgpu_ring_write(ring,	reg << 2);
1520 	amdgpu_ring_write(ring, val);
1521 }
1522 
1523 void vcn_v4_0_3_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1524 				       unsigned int vmid, uint64_t pd_addr)
1525 {
1526 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
1527 
1528 	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1529 
1530 	/* wait for reg writes */
1531 	vcn_v4_0_3_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1532 					vmid * hub->ctx_addr_distance,
1533 					lower_32_bits(pd_addr), 0xffffffff);
1534 }
1535 
1536 void vcn_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1537 {
1538 	/* VCN engine access for HDP flush doesn't work when RRMT is enabled.
1539 	 * This is a workaround to avoid any HDP flush through VCN ring.
1540 	 */
1541 }
1542 
1543 /**
1544  * vcn_v4_0_3_unified_ring_set_wptr - set enc write pointer
1545  *
1546  * @ring: amdgpu_ring pointer
1547  *
1548  * Commits the enc write pointer to the hardware
1549  */
1550 static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring)
1551 {
1552 	struct amdgpu_device *adev = ring->adev;
1553 
1554 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1555 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1556 
1557 	if (ring->use_doorbell) {
1558 		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1559 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1560 	} else {
1561 		WREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR,
1562 			     lower_32_bits(ring->wptr));
1563 	}
1564 }
1565 
1566 static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
1567 	.type = AMDGPU_RING_TYPE_VCN_ENC,
1568 	.align_mask = 0x3f,
1569 	.nop = VCN_ENC_CMD_NO_OP,
1570 	.get_rptr = vcn_v4_0_3_unified_ring_get_rptr,
1571 	.get_wptr = vcn_v4_0_3_unified_ring_get_wptr,
1572 	.set_wptr = vcn_v4_0_3_unified_ring_set_wptr,
1573 	.emit_frame_size =
1574 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1575 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1576 		4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1577 		5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1578 		1, /* vcn_v2_0_enc_ring_insert_end */
1579 	.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1580 	.emit_ib = vcn_v2_0_enc_ring_emit_ib,
1581 	.emit_fence = vcn_v2_0_enc_ring_emit_fence,
1582 	.emit_vm_flush = vcn_v4_0_3_enc_ring_emit_vm_flush,
1583 	.emit_hdp_flush = vcn_v4_0_3_ring_emit_hdp_flush,
1584 	.test_ring = amdgpu_vcn_enc_ring_test_ring,
1585 	.test_ib = amdgpu_vcn_unified_ring_test_ib,
1586 	.insert_nop = amdgpu_ring_insert_nop,
1587 	.insert_end = vcn_v2_0_enc_ring_insert_end,
1588 	.pad_ib = amdgpu_ring_generic_pad_ib,
1589 	.begin_use = amdgpu_vcn_ring_begin_use,
1590 	.end_use = amdgpu_vcn_ring_end_use,
1591 	.emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg,
1592 	.emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait,
1593 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1594 };
1595 
1596 /**
1597  * vcn_v4_0_3_set_unified_ring_funcs - set unified ring functions
1598  *
1599  * @adev: amdgpu_device pointer
1600  *
1601  * Set unified ring functions
1602  */
1603 static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev)
1604 {
1605 	int i, vcn_inst;
1606 
1607 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1608 		adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v4_0_3_unified_ring_vm_funcs;
1609 		adev->vcn.inst[i].ring_enc[0].me = i;
1610 		vcn_inst = GET_INST(VCN, i);
1611 		adev->vcn.inst[i].aid_id =
1612 			vcn_inst / adev->vcn.num_inst_per_aid;
1613 	}
1614 }
1615 
1616 /**
1617  * vcn_v4_0_3_is_idle - check VCN block is idle
1618  *
1619  * @ip_block: Pointer to the amdgpu_ip_block structure
1620  *
1621  * Check whether VCN block is idle
1622  */
1623 static bool vcn_v4_0_3_is_idle(struct amdgpu_ip_block *ip_block)
1624 {
1625 	struct amdgpu_device *adev = ip_block->adev;
1626 	int i, ret = 1;
1627 
1628 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1629 		ret &= (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) ==
1630 			UVD_STATUS__IDLE);
1631 	}
1632 
1633 	return ret;
1634 }
1635 
1636 /**
1637  * vcn_v4_0_3_wait_for_idle - wait for VCN block idle
1638  *
1639  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
1640  *
1641  * Wait for VCN block idle
1642  */
1643 static int vcn_v4_0_3_wait_for_idle(struct amdgpu_ip_block *ip_block)
1644 {
1645 	struct amdgpu_device *adev = ip_block->adev;
1646 	int i, ret = 0;
1647 
1648 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1649 		ret = SOC15_WAIT_ON_RREG(VCN, GET_INST(VCN, i), regUVD_STATUS,
1650 					 UVD_STATUS__IDLE, UVD_STATUS__IDLE);
1651 		if (ret)
1652 			return ret;
1653 	}
1654 
1655 	return ret;
1656 }
1657 
1658 /* vcn_v4_0_3_set_clockgating_state - set VCN block clockgating state
1659  *
1660  * @ip_block: amdgpu_ip_block pointer
1661  * @state: clock gating state
1662  *
1663  * Set VCN block clockgating state
1664  */
1665 static int vcn_v4_0_3_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1666 					  enum amd_clockgating_state state)
1667 {
1668 	struct amdgpu_device *adev = ip_block->adev;
1669 	bool enable = state == AMD_CG_STATE_GATE;
1670 	int i;
1671 
1672 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1673 		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
1674 
1675 		if (enable) {
1676 			if (RREG32_SOC15(VCN, GET_INST(VCN, i),
1677 					 regUVD_STATUS) != UVD_STATUS__IDLE)
1678 				return -EBUSY;
1679 			vcn_v4_0_3_enable_clock_gating(vinst);
1680 		} else {
1681 			vcn_v4_0_3_disable_clock_gating(vinst);
1682 		}
1683 	}
1684 	return 0;
1685 }
1686 
1687 static int vcn_v4_0_3_set_pg_state(struct amdgpu_vcn_inst *vinst,
1688 				   enum amd_powergating_state state)
1689 {
1690 	struct amdgpu_device *adev = vinst->adev;
1691 	int ret = 0;
1692 
1693 	/* for SRIOV, guest should not control VCN Power-gating
1694 	 * MMSCH FW should control Power-gating and clock-gating
1695 	 * guest should avoid touching CGC and PG
1696 	 */
1697 	if (amdgpu_sriov_vf(adev)) {
1698 		vinst->cur_state = AMD_PG_STATE_UNGATE;
1699 		return 0;
1700 	}
1701 
1702 	if (state == vinst->cur_state)
1703 		return 0;
1704 
1705 	if (state == AMD_PG_STATE_GATE)
1706 		ret = vcn_v4_0_3_stop(vinst);
1707 	else
1708 		ret = vcn_v4_0_3_start(vinst);
1709 
1710 	if (!ret)
1711 		vinst->cur_state = state;
1712 
1713 	return ret;
1714 }
1715 
1716 /**
1717  * vcn_v4_0_3_set_interrupt_state - set VCN block interrupt state
1718  *
1719  * @adev: amdgpu_device pointer
1720  * @source: interrupt sources
1721  * @type: interrupt types
1722  * @state: interrupt states
1723  *
1724  * Set VCN block interrupt state
1725  */
1726 static int vcn_v4_0_3_set_interrupt_state(struct amdgpu_device *adev,
1727 					struct amdgpu_irq_src *source,
1728 					unsigned int type,
1729 					enum amdgpu_interrupt_state state)
1730 {
1731 	return 0;
1732 }
1733 
1734 /**
1735  * vcn_v4_0_3_process_interrupt - process VCN block interrupt
1736  *
1737  * @adev: amdgpu_device pointer
1738  * @source: interrupt sources
1739  * @entry: interrupt entry from clients and sources
1740  *
1741  * Process VCN block interrupt
1742  */
1743 static int vcn_v4_0_3_process_interrupt(struct amdgpu_device *adev,
1744 				      struct amdgpu_irq_src *source,
1745 				      struct amdgpu_iv_entry *entry)
1746 {
1747 	uint32_t i, inst;
1748 
1749 	i = node_id_to_phys_map[entry->node_id];
1750 
1751 	DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n");
1752 
1753 	for (inst = 0; inst < adev->vcn.num_vcn_inst; ++inst)
1754 		if (adev->vcn.inst[inst].aid_id == i)
1755 			break;
1756 
1757 	if (inst >= adev->vcn.num_vcn_inst) {
1758 		dev_WARN_ONCE(adev->dev, 1,
1759 			      "Interrupt received for unknown VCN instance %d",
1760 			      entry->node_id);
1761 		return 0;
1762 	}
1763 
1764 	switch (entry->src_id) {
1765 	case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1766 		amdgpu_fence_process(&adev->vcn.inst[inst].ring_enc[0]);
1767 		break;
1768 	default:
1769 		DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
1770 			  entry->src_id, entry->src_data[0]);
1771 		break;
1772 	}
1773 
1774 	return 0;
1775 }
1776 
1777 static const struct amdgpu_irq_src_funcs vcn_v4_0_3_irq_funcs = {
1778 	.set = vcn_v4_0_3_set_interrupt_state,
1779 	.process = vcn_v4_0_3_process_interrupt,
1780 };
1781 
1782 /**
1783  * vcn_v4_0_3_set_irq_funcs - set VCN block interrupt irq functions
1784  *
1785  * @adev: amdgpu_device pointer
1786  *
1787  * Set VCN block interrupt irq functions
1788  */
1789 static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
1790 {
1791 	int i;
1792 
1793 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1794 		adev->vcn.inst->irq.num_types++;
1795 	}
1796 	adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs;
1797 }
1798 
1799 static void vcn_v4_0_3_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
1800 {
1801 	struct amdgpu_device *adev = ip_block->adev;
1802 	int i, j;
1803 	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
1804 	uint32_t inst_off, is_powered;
1805 
1806 	if (!adev->vcn.ip_dump)
1807 		return;
1808 
1809 	drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
1810 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1811 		if (adev->vcn.harvest_config & (1 << i)) {
1812 			drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
1813 			continue;
1814 		}
1815 
1816 		inst_off = i * reg_count;
1817 		is_powered = (adev->vcn.ip_dump[inst_off] &
1818 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
1819 
1820 		if (is_powered) {
1821 			drm_printf(p, "\nActive Instance:VCN%d\n", i);
1822 			for (j = 0; j < reg_count; j++)
1823 				drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_3[j].reg_name,
1824 					   adev->vcn.ip_dump[inst_off + j]);
1825 		} else {
1826 			drm_printf(p, "\nInactive Instance:VCN%d\n", i);
1827 		}
1828 	}
1829 }
1830 
1831 static void vcn_v4_0_3_dump_ip_state(struct amdgpu_ip_block *ip_block)
1832 {
1833 	struct amdgpu_device *adev = ip_block->adev;
1834 	int i, j;
1835 	bool is_powered;
1836 	uint32_t inst_off, inst_id;
1837 	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
1838 
1839 	if (!adev->vcn.ip_dump)
1840 		return;
1841 
1842 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1843 		if (adev->vcn.harvest_config & (1 << i))
1844 			continue;
1845 
1846 		inst_id = GET_INST(VCN, i);
1847 		inst_off = i * reg_count;
1848 		/* mmUVD_POWER_STATUS is always readable and is first element of the array */
1849 		adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, inst_id, regUVD_POWER_STATUS);
1850 		is_powered = (adev->vcn.ip_dump[inst_off] &
1851 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
1852 
1853 		if (is_powered)
1854 			for (j = 1; j < reg_count; j++)
1855 				adev->vcn.ip_dump[inst_off + j] =
1856 					RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0_3[j],
1857 									   inst_id));
1858 	}
1859 }
1860 
1861 static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
1862 	.name = "vcn_v4_0_3",
1863 	.early_init = vcn_v4_0_3_early_init,
1864 	.sw_init = vcn_v4_0_3_sw_init,
1865 	.sw_fini = vcn_v4_0_3_sw_fini,
1866 	.hw_init = vcn_v4_0_3_hw_init,
1867 	.hw_fini = vcn_v4_0_3_hw_fini,
1868 	.suspend = vcn_v4_0_3_suspend,
1869 	.resume = vcn_v4_0_3_resume,
1870 	.is_idle = vcn_v4_0_3_is_idle,
1871 	.wait_for_idle = vcn_v4_0_3_wait_for_idle,
1872 	.set_clockgating_state = vcn_v4_0_3_set_clockgating_state,
1873 	.set_powergating_state = vcn_set_powergating_state,
1874 	.dump_ip_state = vcn_v4_0_3_dump_ip_state,
1875 	.print_ip_state = vcn_v4_0_3_print_ip_state,
1876 };
1877 
1878 const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = {
1879 	.type = AMD_IP_BLOCK_TYPE_VCN,
1880 	.major = 4,
1881 	.minor = 0,
1882 	.rev = 3,
1883 	.funcs = &vcn_v4_0_3_ip_funcs,
1884 };
1885 
1886 static const struct amdgpu_ras_err_status_reg_entry vcn_v4_0_3_ue_reg_list[] = {
1887 	{AMDGPU_RAS_REG_ENTRY(VCN, 0, regVCN_UE_ERR_STATUS_LO_VIDD, regVCN_UE_ERR_STATUS_HI_VIDD),
1888 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "VIDD"},
1889 	{AMDGPU_RAS_REG_ENTRY(VCN, 0, regVCN_UE_ERR_STATUS_LO_VIDV, regVCN_UE_ERR_STATUS_HI_VIDV),
1890 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "VIDV"},
1891 };
1892 
1893 static void vcn_v4_0_3_inst_query_ras_error_count(struct amdgpu_device *adev,
1894 						  uint32_t vcn_inst,
1895 						  void *ras_err_status)
1896 {
1897 	struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status;
1898 
1899 	/* vcn v4_0_3 only support query uncorrectable errors */
1900 	amdgpu_ras_inst_query_ras_error_count(adev,
1901 			vcn_v4_0_3_ue_reg_list,
1902 			ARRAY_SIZE(vcn_v4_0_3_ue_reg_list),
1903 			NULL, 0, GET_INST(VCN, vcn_inst),
1904 			AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
1905 			&err_data->ue_count);
1906 }
1907 
1908 static void vcn_v4_0_3_query_ras_error_count(struct amdgpu_device *adev,
1909 					     void *ras_err_status)
1910 {
1911 	uint32_t i;
1912 
1913 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) {
1914 		dev_warn(adev->dev, "VCN RAS is not supported\n");
1915 		return;
1916 	}
1917 
1918 	for (i = 0; i < adev->vcn.num_vcn_inst; i++)
1919 		vcn_v4_0_3_inst_query_ras_error_count(adev, i, ras_err_status);
1920 }
1921 
1922 static void vcn_v4_0_3_inst_reset_ras_error_count(struct amdgpu_device *adev,
1923 						  uint32_t vcn_inst)
1924 {
1925 	amdgpu_ras_inst_reset_ras_error_count(adev,
1926 					vcn_v4_0_3_ue_reg_list,
1927 					ARRAY_SIZE(vcn_v4_0_3_ue_reg_list),
1928 					GET_INST(VCN, vcn_inst));
1929 }
1930 
1931 static void vcn_v4_0_3_reset_ras_error_count(struct amdgpu_device *adev)
1932 {
1933 	uint32_t i;
1934 
1935 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) {
1936 		dev_warn(adev->dev, "VCN RAS is not supported\n");
1937 		return;
1938 	}
1939 
1940 	for (i = 0; i < adev->vcn.num_vcn_inst; i++)
1941 		vcn_v4_0_3_inst_reset_ras_error_count(adev, i);
1942 }
1943 
1944 static const struct amdgpu_ras_block_hw_ops vcn_v4_0_3_ras_hw_ops = {
1945 	.query_ras_error_count = vcn_v4_0_3_query_ras_error_count,
1946 	.reset_ras_error_count = vcn_v4_0_3_reset_ras_error_count,
1947 };
1948 
1949 static int vcn_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
1950 				      enum aca_smu_type type, void *data)
1951 {
1952 	struct aca_bank_info info;
1953 	u64 misc0;
1954 	int ret;
1955 
1956 	ret = aca_bank_info_decode(bank, &info);
1957 	if (ret)
1958 		return ret;
1959 
1960 	misc0 = bank->regs[ACA_REG_IDX_MISC0];
1961 	switch (type) {
1962 	case ACA_SMU_TYPE_UE:
1963 		bank->aca_err_type = ACA_ERROR_TYPE_UE;
1964 		ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
1965 						     1ULL);
1966 		break;
1967 	case ACA_SMU_TYPE_CE:
1968 		bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank);
1969 		ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
1970 						     ACA_REG__MISC0__ERRCNT(misc0));
1971 		break;
1972 	default:
1973 		return -EINVAL;
1974 	}
1975 
1976 	return ret;
1977 }
1978 
1979 /* reference to smu driver if header file */
1980 static int vcn_v4_0_3_err_codes[] = {
1981 	14, 15, /* VCN */
1982 };
1983 
1984 static bool vcn_v4_0_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
1985 					 enum aca_smu_type type, void *data)
1986 {
1987 	u32 instlo;
1988 
1989 	instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
1990 	instlo &= GENMASK(31, 1);
1991 
1992 	if (instlo != mmSMNAID_AID0_MCA_SMU)
1993 		return false;
1994 
1995 	if (aca_bank_check_error_codes(handle->adev, bank,
1996 				       vcn_v4_0_3_err_codes,
1997 				       ARRAY_SIZE(vcn_v4_0_3_err_codes)))
1998 		return false;
1999 
2000 	return true;
2001 }
2002 
2003 static const struct aca_bank_ops vcn_v4_0_3_aca_bank_ops = {
2004 	.aca_bank_parser = vcn_v4_0_3_aca_bank_parser,
2005 	.aca_bank_is_valid = vcn_v4_0_3_aca_bank_is_valid,
2006 };
2007 
2008 static const struct aca_info vcn_v4_0_3_aca_info = {
2009 	.hwip = ACA_HWIP_TYPE_SMU,
2010 	.mask = ACA_ERROR_UE_MASK,
2011 	.bank_ops = &vcn_v4_0_3_aca_bank_ops,
2012 };
2013 
2014 static int vcn_v4_0_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
2015 {
2016 	int r;
2017 
2018 	r = amdgpu_ras_block_late_init(adev, ras_block);
2019 	if (r)
2020 		return r;
2021 
2022 	r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__VCN,
2023 				&vcn_v4_0_3_aca_info, NULL);
2024 	if (r)
2025 		goto late_fini;
2026 
2027 	return 0;
2028 
2029 late_fini:
2030 	amdgpu_ras_block_late_fini(adev, ras_block);
2031 
2032 	return r;
2033 }
2034 
2035 static struct amdgpu_vcn_ras vcn_v4_0_3_ras = {
2036 	.ras_block = {
2037 		.hw_ops = &vcn_v4_0_3_ras_hw_ops,
2038 		.ras_late_init = vcn_v4_0_3_ras_late_init,
2039 	},
2040 };
2041 
2042 static void vcn_v4_0_3_set_ras_funcs(struct amdgpu_device *adev)
2043 {
2044 	adev->vcn.ras = &vcn_v4_0_3_ras;
2045 }
2046 
2047 static void vcn_v4_0_3_enable_ras(struct amdgpu_device *adev,
2048 				  int inst_idx, bool indirect)
2049 {
2050 	uint32_t tmp;
2051 
2052 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
2053 		return;
2054 
2055 	tmp = VCN_RAS_CNTL__VCPU_VCODEC_REARM_MASK |
2056 	      VCN_RAS_CNTL__VCPU_VCODEC_IH_EN_MASK |
2057 	      VCN_RAS_CNTL__VCPU_VCODEC_PMI_EN_MASK |
2058 	      VCN_RAS_CNTL__VCPU_VCODEC_STALL_EN_MASK;
2059 	WREG32_SOC15_DPG_MODE(inst_idx,
2060 			      SOC15_DPG_MODE_OFFSET(VCN, 0, regVCN_RAS_CNTL),
2061 			      tmp, 0, indirect);
2062 
2063 	tmp = UVD_VCPU_INT_EN2__RASCNTL_VCPU_VCODEC_EN_MASK;
2064 	WREG32_SOC15_DPG_MODE(inst_idx,
2065 			      SOC15_DPG_MODE_OFFSET(VCN, 0, regUVD_VCPU_INT_EN2),
2066 			      tmp, 0, indirect);
2067 
2068 	tmp = UVD_SYS_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
2069 	WREG32_SOC15_DPG_MODE(inst_idx,
2070 			      SOC15_DPG_MODE_OFFSET(VCN, 0, regUVD_SYS_INT_EN),
2071 			      tmp, 0, indirect);
2072 }
2073