xref: /linux/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c (revision aec2f682d47c54ef434b2d440992626d80b1ebdc)
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <drm/drm_drv.h>
26 
27 #include "amdgpu.h"
28 #include "amdgpu_vcn.h"
29 #include "amdgpu_pm.h"
30 #include "soc15.h"
31 #include "soc15d.h"
32 #include "soc15_hw_ip.h"
33 #include "vcn_v2_0.h"
34 #include "vcn_v4_0_3.h"
35 #include "mmsch_v4_0_3.h"
36 
37 #include "vcn/vcn_4_0_3_offset.h"
38 #include "vcn/vcn_4_0_3_sh_mask.h"
39 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
40 
41 #define mmUVD_DPG_LMA_CTL		regUVD_DPG_LMA_CTL
42 #define mmUVD_DPG_LMA_CTL_BASE_IDX	regUVD_DPG_LMA_CTL_BASE_IDX
43 #define mmUVD_DPG_LMA_DATA		regUVD_DPG_LMA_DATA
44 #define mmUVD_DPG_LMA_DATA_BASE_IDX	regUVD_DPG_LMA_DATA_BASE_IDX
45 
46 #define VCN_VID_SOC_ADDRESS_2_0		0x1fb00
47 #define VCN1_VID_SOC_ADDRESS_3_0	0x48300
48 #define VCN1_AON_SOC_ADDRESS_3_0	0x48000
49 
50 static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_3[] = {
51 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
52 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
53 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
54 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
55 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
56 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
57 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
58 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
59 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
60 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
61 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
62 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
63 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
64 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
65 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
66 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
67 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
68 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
69 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
70 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
71 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
72 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
73 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
74 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
75 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
76 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
77 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
78 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_CONFIG),
79 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_STATUS),
80 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
81 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
82 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
83 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
84 };
85 
86 #define NORMALIZE_VCN_REG_OFFSET(offset) \
87 		(offset & 0x1FFFF)
88 
89 static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev);
90 static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev);
91 static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev);
92 static int vcn_v4_0_3_set_pg_state(struct amdgpu_vcn_inst *vinst,
93 				   enum amd_powergating_state state);
94 static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
95 				     struct dpg_pause_state *new_state);
96 static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring);
97 static void vcn_v4_0_3_set_ras_funcs(struct amdgpu_device *adev);
98 static void vcn_v4_0_3_enable_ras(struct amdgpu_device *adev,
99 				  int inst_idx, bool indirect);
100 
101 static inline bool vcn_v4_0_3_normalizn_reqd(struct amdgpu_device *adev)
102 {
103 	return (adev->vcn.caps & AMDGPU_VCN_CAPS(RRMT_ENABLED)) == 0;
104 }
105 
106 /**
107  * vcn_v4_0_3_early_init - set function pointers
108  *
109  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
110  *
111  * Set ring and irq function pointers
112  */
113 static int vcn_v4_0_3_early_init(struct amdgpu_ip_block *ip_block)
114 {
115 	struct amdgpu_device *adev = ip_block->adev;
116 	int i, r;
117 
118 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
119 		/* re-use enc ring as unified ring */
120 		adev->vcn.inst[i].num_enc_rings = 1;
121 
122 	vcn_v4_0_3_set_unified_ring_funcs(adev);
123 	vcn_v4_0_3_set_irq_funcs(adev);
124 	vcn_v4_0_3_set_ras_funcs(adev);
125 
126 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
127 		adev->vcn.inst[i].set_pg_state = vcn_v4_0_3_set_pg_state;
128 
129 		r = amdgpu_vcn_early_init(adev, i);
130 		if (r)
131 			return r;
132 	}
133 
134 	return 0;
135 }
136 
137 static bool vcn_v4_0_3_is_psp_fw_reset_supported(struct amdgpu_device *adev)
138 {
139 	uint32_t fw_ver = adev->psp.sos.fw_version;
140 	uint32_t pgm = (fw_ver >> 8) & 0xFF;
141 
142 	/*
143 	 * FWDEV-159155: PSP SOS FW must be >= 0x0036015f for program 0x01
144 	 * before enabling VCN per-queue reset.
145 	 */
146 	if (pgm == 1)
147 		return fw_ver >= 0x0036015f;
148 
149 	return true;
150 }
151 
152 static int vcn_v4_0_3_late_init(struct amdgpu_ip_block *ip_block)
153 {
154 	struct amdgpu_device *adev = ip_block->adev;
155 
156 	adev->vcn.supported_reset =
157 		amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
158 
159 	if (amdgpu_dpm_reset_vcn_is_supported(adev) &&
160 	    vcn_v4_0_3_is_psp_fw_reset_supported(adev) &&
161 	    !amdgpu_sriov_vf(adev))
162 		adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
163 
164 	return 0;
165 }
166 
167 static int vcn_v4_0_3_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
168 {
169 	struct amdgpu_vcn4_fw_shared *fw_shared;
170 
171 	fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
172 	fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
173 	fw_shared->sq.is_enabled = 1;
174 
175 	if (amdgpu_vcnfw_log)
176 		amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]);
177 
178 	return 0;
179 }
180 
181 /**
182  * vcn_v4_0_3_sw_init - sw init for VCN block
183  *
184  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
185  *
186  * Load firmware and sw initialization
187  */
188 static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
189 {
190 	struct amdgpu_device *adev = ip_block->adev;
191 	struct amdgpu_ring *ring;
192 	int i, r, vcn_inst;
193 
194 	/* VCN DEC TRAP */
195 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
196 		VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq);
197 	if (r)
198 		return r;
199 
200 	/* VCN POISON TRAP */
201 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
202 		VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst->ras_poison_irq);
203 
204 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
205 
206 		r = amdgpu_vcn_sw_init(adev, i);
207 		if (r)
208 			return r;
209 
210 		amdgpu_vcn_setup_ucode(adev, i);
211 
212 		r = amdgpu_vcn_resume(adev, i);
213 		if (r)
214 			return r;
215 
216 		vcn_inst = GET_INST(VCN, i);
217 
218 		ring = &adev->vcn.inst[i].ring_enc[0];
219 		ring->use_doorbell = true;
220 
221 		if (!amdgpu_sriov_vf(adev))
222 			ring->doorbell_index =
223 				(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
224 				9 * vcn_inst;
225 		else
226 			ring->doorbell_index =
227 				(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
228 				32 * vcn_inst;
229 
230 		ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
231 		sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
232 
233 		/* There are no per-instance irq source IDs on 4.0.3, the IH
234 		 * packets use a separate field to differentiate instances.
235 		 */
236 		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[0].irq, 0,
237 				     AMDGPU_RING_PRIO_DEFAULT,
238 				     &adev->vcn.inst[i].sched_score);
239 		if (r)
240 			return r;
241 
242 		vcn_v4_0_3_fw_shared_init(adev, i);
243 
244 		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
245 			adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode;
246 	}
247 
248 	if (amdgpu_sriov_vf(adev)) {
249 		r = amdgpu_virt_alloc_mm_table(adev);
250 		if (r)
251 			return r;
252 	}
253 
254 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) {
255 		r = amdgpu_vcn_ras_sw_init(adev);
256 		if (r) {
257 			dev_err(adev->dev, "Failed to initialize vcn ras block!\n");
258 			return r;
259 		}
260 	}
261 
262 	r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_4_0_3, ARRAY_SIZE(vcn_reg_list_4_0_3));
263 	if (r)
264 		return r;
265 
266 	return amdgpu_vcn_sysfs_reset_mask_init(adev);
267 }
268 
269 /**
270  * vcn_v4_0_3_sw_fini - sw fini for VCN block
271  *
272  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
273  *
274  * VCN suspend and free up sw allocation
275  */
276 static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
277 {
278 	struct amdgpu_device *adev = ip_block->adev;
279 	int i, r, idx;
280 
281 	if (drm_dev_enter(&adev->ddev, &idx)) {
282 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
283 			struct amdgpu_vcn4_fw_shared *fw_shared;
284 
285 			fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
286 			fw_shared->present_flag_0 = 0;
287 			fw_shared->sq.is_enabled = cpu_to_le32(false);
288 		}
289 		drm_dev_exit(idx);
290 	}
291 
292 	if (amdgpu_sriov_vf(adev))
293 		amdgpu_virt_free_mm_table(adev);
294 
295 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
296 		r = amdgpu_vcn_suspend(adev, i);
297 		if (r)
298 			return r;
299 	}
300 
301 	amdgpu_vcn_sysfs_reset_mask_fini(adev);
302 
303 	for (i = 0; i < adev->vcn.num_vcn_inst; i++)
304 		amdgpu_vcn_sw_fini(adev, i);
305 
306 	return 0;
307 }
308 
309 static int vcn_v4_0_3_hw_init_inst(struct amdgpu_vcn_inst *vinst)
310 {
311 	int vcn_inst;
312 	struct amdgpu_device *adev = vinst->adev;
313 	struct amdgpu_ring *ring;
314 	int inst_idx = vinst->inst;
315 
316 	vcn_inst = GET_INST(VCN, inst_idx);
317 	ring = &adev->vcn.inst[inst_idx].ring_enc[0];
318 	if (ring->use_doorbell) {
319 		adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
320 			(adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * vcn_inst,
321 			adev->vcn.inst[inst_idx].aid_id);
322 
323 		WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
324 			ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
325 			VCN_RB1_DB_CTRL__EN_MASK);
326 
327 		/* Read DB_CTRL to flush the write DB_CTRL command. */
328 		RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
329 	}
330 
331 	return 0;
332 }
333 
334 /**
335  * vcn_v4_0_3_hw_init - start and test VCN block
336  *
337  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
338  *
339  * Initialize the hardware, boot up the VCPU and do some testing
340  */
341 static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
342 {
343 	struct amdgpu_device *adev = ip_block->adev;
344 	struct amdgpu_ring *ring;
345 	struct amdgpu_vcn_inst *vinst;
346 	int i, r;
347 
348 	if (amdgpu_sriov_vf(adev)) {
349 		r = vcn_v4_0_3_start_sriov(adev);
350 		if (r)
351 			return r;
352 
353 		for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
354 			ring = &adev->vcn.inst[i].ring_enc[0];
355 			ring->wptr = 0;
356 			ring->wptr_old = 0;
357 			vcn_v4_0_3_unified_ring_set_wptr(ring);
358 			ring->sched.ready = true;
359 		}
360 	} else {
361 		/* This flag is not set for VF, assumed to be disabled always */
362 		if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) &
363 		    0x100)
364 			adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
365 
366 		for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
367 			struct amdgpu_vcn4_fw_shared *fw_shared;
368 
369 			ring = &adev->vcn.inst[i].ring_enc[0];
370 			vinst = &adev->vcn.inst[i];
371 			vcn_v4_0_3_hw_init_inst(vinst);
372 
373 			/* Re-init fw_shared when RAS fatal error occurred */
374 			fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
375 			if (!fw_shared->sq.is_enabled)
376 				vcn_v4_0_3_fw_shared_init(adev, i);
377 
378 			r = amdgpu_ring_test_helper(ring);
379 			if (r)
380 				return r;
381 		}
382 	}
383 
384 	return r;
385 }
386 
387 /**
388  * vcn_v4_0_3_hw_fini - stop the hardware block
389  *
390  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
391  *
392  * Stop the VCN block, mark ring as not ready any more
393  */
394 static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
395 {
396 	struct amdgpu_device *adev = ip_block->adev;
397 	int i;
398 
399 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
400 		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
401 
402 		cancel_delayed_work_sync(&vinst->idle_work);
403 
404 		if (vinst->cur_state != AMD_PG_STATE_GATE)
405 			vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
406 	}
407 
408 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN) && !amdgpu_sriov_vf(adev))
409 		amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0);
410 
411 	return 0;
412 }
413 
414 /**
415  * vcn_v4_0_3_suspend - suspend VCN block
416  *
417  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
418  *
419  * HW fini and suspend VCN block
420  */
421 static int vcn_v4_0_3_suspend(struct amdgpu_ip_block *ip_block)
422 {
423 	struct amdgpu_device *adev = ip_block->adev;
424 	int r, i;
425 
426 	r = vcn_v4_0_3_hw_fini(ip_block);
427 	if (r)
428 		return r;
429 
430 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
431 		r = amdgpu_vcn_suspend(adev, i);
432 		if (r)
433 			return r;
434 	}
435 
436 	return 0;
437 }
438 
439 /**
440  * vcn_v4_0_3_resume - resume VCN block
441  *
442  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
443  *
444  * Resume firmware and hw init VCN block
445  */
446 static int vcn_v4_0_3_resume(struct amdgpu_ip_block *ip_block)
447 {
448 	struct amdgpu_device *adev = ip_block->adev;
449 	int r, i;
450 
451 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
452 		r = amdgpu_vcn_resume(ip_block->adev, i);
453 		if (r)
454 			return r;
455 	}
456 
457 	r = vcn_v4_0_3_hw_init(ip_block);
458 
459 	return r;
460 }
461 
462 /**
463  * vcn_v4_0_3_mc_resume - memory controller programming
464  *
465  * @vinst: VCN instance
466  *
467  * Let the VCN memory controller know it's offsets
468  */
469 static void vcn_v4_0_3_mc_resume(struct amdgpu_vcn_inst *vinst)
470 {
471 	struct amdgpu_device *adev = vinst->adev;
472 	int inst_idx = vinst->inst;
473 	uint32_t offset, size, vcn_inst;
474 	const struct common_firmware_header *hdr;
475 
476 	hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
477 	size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
478 
479 	vcn_inst = GET_INST(VCN, inst_idx);
480 	/* cache window 0: fw */
481 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
482 		WREG32_SOC15(
483 			VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
484 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx]
485 				 .tmr_mc_addr_lo));
486 		WREG32_SOC15(
487 			VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
488 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx]
489 				 .tmr_mc_addr_hi));
490 		WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, 0);
491 		offset = 0;
492 	} else {
493 		WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
494 			     lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr));
495 		WREG32_SOC15(VCN, vcn_inst,
496 			     regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
497 			     upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr));
498 		offset = size;
499 		WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0,
500 			     AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
501 	}
502 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE0, size);
503 
504 	/* cache window 1: stack */
505 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
506 		     lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset));
507 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
508 		     upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset));
509 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET1, 0);
510 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE1,
511 		     AMDGPU_VCN_STACK_SIZE);
512 
513 	/* cache window 2: context */
514 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
515 		     lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
516 				   AMDGPU_VCN_STACK_SIZE));
517 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
518 		     upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
519 				   AMDGPU_VCN_STACK_SIZE));
520 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET2, 0);
521 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE2,
522 		     AMDGPU_VCN_CONTEXT_SIZE);
523 
524 	/* non-cache window */
525 	WREG32_SOC15(
526 		VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
527 		lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr));
528 	WREG32_SOC15(
529 		VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
530 		upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr));
531 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
532 	WREG32_SOC15(
533 		VCN, vcn_inst, regUVD_VCPU_NONCACHE_SIZE0,
534 		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
535 }
536 
537 /**
538  * vcn_v4_0_3_mc_resume_dpg_mode - memory controller programming for dpg mode
539  *
540  * @vinst: VCN instance
541  * @indirect: indirectly write sram
542  *
543  * Let the VCN memory controller know it's offsets with dpg mode
544  */
545 static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
546 					  bool indirect)
547 {
548 	struct amdgpu_device *adev = vinst->adev;
549 	int inst_idx = vinst->inst;
550 	uint32_t offset, size;
551 	const struct common_firmware_header *hdr;
552 
553 	hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
554 	size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
555 
556 	/* cache window 0: fw */
557 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
558 		if (!indirect) {
559 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
560 				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
561 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
562 					inst_idx].tmr_mc_addr_lo), 0, indirect);
563 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
564 				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
565 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
566 					inst_idx].tmr_mc_addr_hi), 0, indirect);
567 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
568 				VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
569 		} else {
570 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
571 				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
572 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
573 				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
574 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
575 				VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
576 		}
577 		offset = 0;
578 	} else {
579 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
580 			VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
581 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
582 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
583 			VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
584 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
585 		offset = size;
586 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
587 			VCN, 0, regUVD_VCPU_CACHE_OFFSET0),
588 			AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
589 	}
590 
591 	if (!indirect)
592 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
593 			VCN, 0, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
594 	else
595 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
596 			VCN, 0, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
597 
598 	/* cache window 1: stack */
599 	if (!indirect) {
600 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
601 			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
602 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
603 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
604 			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
605 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
606 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
607 			VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
608 	} else {
609 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
610 			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
611 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
612 			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
613 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
614 			VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
615 	}
616 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
617 			VCN, 0, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
618 
619 	/* cache window 2: context */
620 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
621 			VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
622 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
623 				AMDGPU_VCN_STACK_SIZE), 0, indirect);
624 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
625 			VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
626 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
627 				AMDGPU_VCN_STACK_SIZE), 0, indirect);
628 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
629 			VCN, 0, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
630 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
631 			VCN, 0, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
632 
633 	/* non-cache window */
634 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
635 			VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
636 			lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
637 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
638 			VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
639 			upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
640 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
641 			VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
642 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
643 			VCN, 0, regUVD_VCPU_NONCACHE_SIZE0),
644 			AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
645 
646 	/* VCN global tiling registers */
647 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
648 		VCN, 0, regUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
649 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
650 		VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
651 }
652 
653 /**
654  * vcn_v4_0_3_disable_clock_gating - disable VCN clock gating
655  *
656  * @vinst: VCN instance
657  *
658  * Disable clock gating for VCN block
659  */
660 static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
661 {
662 	struct amdgpu_device *adev = vinst->adev;
663 	int inst_idx = vinst->inst;
664 	uint32_t data;
665 	int vcn_inst;
666 
667 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
668 		return;
669 
670 	vcn_inst = GET_INST(VCN, inst_idx);
671 
672 	/* VCN disable CGC */
673 	data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
674 	data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
675 	data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
676 	data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
677 	WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
678 
679 	data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_GATE);
680 	data &= ~(UVD_CGC_GATE__SYS_MASK
681 		| UVD_CGC_GATE__MPEG2_MASK
682 		| UVD_CGC_GATE__REGS_MASK
683 		| UVD_CGC_GATE__RBC_MASK
684 		| UVD_CGC_GATE__LMI_MC_MASK
685 		| UVD_CGC_GATE__LMI_UMC_MASK
686 		| UVD_CGC_GATE__MPC_MASK
687 		| UVD_CGC_GATE__LBSI_MASK
688 		| UVD_CGC_GATE__LRBBM_MASK
689 		| UVD_CGC_GATE__WCB_MASK
690 		| UVD_CGC_GATE__VCPU_MASK
691 		| UVD_CGC_GATE__MMSCH_MASK);
692 
693 	WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_GATE, data);
694 	SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_CGC_GATE, 0, 0xFFFFFFFF);
695 
696 	data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
697 	data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK
698 		| UVD_CGC_CTRL__MPEG2_MODE_MASK
699 		| UVD_CGC_CTRL__REGS_MODE_MASK
700 		| UVD_CGC_CTRL__RBC_MODE_MASK
701 		| UVD_CGC_CTRL__LMI_MC_MODE_MASK
702 		| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
703 		| UVD_CGC_CTRL__MPC_MODE_MASK
704 		| UVD_CGC_CTRL__LBSI_MODE_MASK
705 		| UVD_CGC_CTRL__LRBBM_MODE_MASK
706 		| UVD_CGC_CTRL__WCB_MODE_MASK
707 		| UVD_CGC_CTRL__VCPU_MODE_MASK
708 		| UVD_CGC_CTRL__MMSCH_MODE_MASK);
709 	WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
710 
711 	data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_GATE);
712 	data |= (UVD_SUVD_CGC_GATE__SRE_MASK
713 		| UVD_SUVD_CGC_GATE__SIT_MASK
714 		| UVD_SUVD_CGC_GATE__SMP_MASK
715 		| UVD_SUVD_CGC_GATE__SCM_MASK
716 		| UVD_SUVD_CGC_GATE__SDB_MASK
717 		| UVD_SUVD_CGC_GATE__SRE_H264_MASK
718 		| UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
719 		| UVD_SUVD_CGC_GATE__SIT_H264_MASK
720 		| UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
721 		| UVD_SUVD_CGC_GATE__SCM_H264_MASK
722 		| UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
723 		| UVD_SUVD_CGC_GATE__SDB_H264_MASK
724 		| UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
725 		| UVD_SUVD_CGC_GATE__ENT_MASK
726 		| UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
727 		| UVD_SUVD_CGC_GATE__SITE_MASK
728 		| UVD_SUVD_CGC_GATE__SRE_VP9_MASK
729 		| UVD_SUVD_CGC_GATE__SCM_VP9_MASK
730 		| UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
731 		| UVD_SUVD_CGC_GATE__SDB_VP9_MASK
732 		| UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
733 	WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_GATE, data);
734 
735 	data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL);
736 	data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
737 		| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
738 		| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
739 		| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
740 		| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
741 		| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
742 		| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
743 		| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
744 	WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL, data);
745 }
746 
747 /**
748  * vcn_v4_0_3_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
749  *
750  * @vinst: VCN instance
751  * @sram_sel: sram select
752  * @indirect: indirectly write sram
753  *
754  * Disable clock gating for VCN block with dpg mode
755  */
756 static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
757 						     uint8_t sram_sel,
758 						     uint8_t indirect)
759 {
760 	struct amdgpu_device *adev = vinst->adev;
761 	int inst_idx = vinst->inst;
762 	uint32_t reg_data = 0;
763 
764 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
765 		return;
766 
767 	/* enable sw clock gating control */
768 	reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
769 	reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
770 	reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
771 	reg_data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK |
772 		 UVD_CGC_CTRL__MPEG2_MODE_MASK |
773 		 UVD_CGC_CTRL__REGS_MODE_MASK |
774 		 UVD_CGC_CTRL__RBC_MODE_MASK |
775 		 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
776 		 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
777 		 UVD_CGC_CTRL__IDCT_MODE_MASK |
778 		 UVD_CGC_CTRL__MPRD_MODE_MASK |
779 		 UVD_CGC_CTRL__MPC_MODE_MASK |
780 		 UVD_CGC_CTRL__LBSI_MODE_MASK |
781 		 UVD_CGC_CTRL__LRBBM_MODE_MASK |
782 		 UVD_CGC_CTRL__WCB_MODE_MASK |
783 		 UVD_CGC_CTRL__VCPU_MODE_MASK);
784 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
785 		VCN, 0, regUVD_CGC_CTRL), reg_data, sram_sel, indirect);
786 
787 	/* turn off clock gating */
788 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
789 		VCN, 0, regUVD_CGC_GATE), 0, sram_sel, indirect);
790 
791 	/* turn on SUVD clock gating */
792 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
793 		VCN, 0, regUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
794 
795 	/* turn on sw mode in UVD_SUVD_CGC_CTRL */
796 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
797 		VCN, 0, regUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
798 }
799 
800 /**
801  * vcn_v4_0_3_enable_clock_gating - enable VCN clock gating
802  *
803  * @vinst: VCN instance
804  *
805  * Enable clock gating for VCN block
806  */
807 static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
808 {
809 	struct amdgpu_device *adev = vinst->adev;
810 	int inst_idx = vinst->inst;
811 	uint32_t data;
812 	int vcn_inst;
813 
814 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
815 		return;
816 
817 	vcn_inst = GET_INST(VCN, inst_idx);
818 
819 	/* enable VCN CGC */
820 	data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
821 	data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
822 	data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
823 	data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
824 	WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
825 
826 	data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
827 	data |= (UVD_CGC_CTRL__SYS_MODE_MASK
828 		| UVD_CGC_CTRL__MPEG2_MODE_MASK
829 		| UVD_CGC_CTRL__REGS_MODE_MASK
830 		| UVD_CGC_CTRL__RBC_MODE_MASK
831 		| UVD_CGC_CTRL__LMI_MC_MODE_MASK
832 		| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
833 		| UVD_CGC_CTRL__MPC_MODE_MASK
834 		| UVD_CGC_CTRL__LBSI_MODE_MASK
835 		| UVD_CGC_CTRL__LRBBM_MODE_MASK
836 		| UVD_CGC_CTRL__WCB_MODE_MASK
837 		| UVD_CGC_CTRL__VCPU_MODE_MASK);
838 	WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
839 
840 	data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL);
841 	data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
842 		| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
843 		| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
844 		| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
845 		| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
846 		| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
847 		| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
848 		| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
849 	WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL, data);
850 }
851 
852 /**
853  * vcn_v4_0_3_start_dpg_mode - VCN start with dpg mode
854  *
855  * @vinst: VCN instance
856  * @indirect: indirectly write sram
857  *
858  * Start VCN block with dpg mode
859  */
860 static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
861 				     bool indirect)
862 {
863 	struct amdgpu_device *adev = vinst->adev;
864 	int inst_idx = vinst->inst;
865 	struct amdgpu_vcn4_fw_shared *fw_shared =
866 						adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
867 	struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__PAUSE};
868 	struct amdgpu_ring *ring;
869 	int vcn_inst, ret;
870 	uint32_t tmp;
871 
872 	vcn_inst = GET_INST(VCN, inst_idx);
873 	/* disable register anti-hang mechanism */
874 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 1,
875 		 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
876 	/* enable dynamic power gating mode */
877 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS);
878 	tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
879 	tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
880 	WREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS, tmp);
881 
882 	if (indirect) {
883 		DRM_DEV_DEBUG(adev->dev, "VCN %d start: on AID %d",
884 			inst_idx, adev->vcn.inst[inst_idx].aid_id);
885 		adev->vcn.inst[inst_idx].dpg_sram_curr_addr =
886 				(uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
887 		/* Use dummy register 0xDEADBEEF passing AID selection to PSP FW */
888 		WREG32_SOC15_DPG_MODE(inst_idx, 0xDEADBEEF,
889 			adev->vcn.inst[inst_idx].aid_id, 0, true);
890 	}
891 
892 	/* enable clock gating */
893 	vcn_v4_0_3_disable_clock_gating_dpg_mode(vinst, 0, indirect);
894 
895 	/* enable VCPU clock */
896 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
897 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
898 	tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
899 
900 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
901 		VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
902 
903 	/* disable master interrupt */
904 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
905 		VCN, 0, regUVD_MASTINT_EN), 0, 0, indirect);
906 
907 	/* setup regUVD_LMI_CTRL */
908 	tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
909 		UVD_LMI_CTRL__REQ_MODE_MASK |
910 		UVD_LMI_CTRL__CRC_RESET_MASK |
911 		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
912 		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
913 		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
914 		(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
915 		0x00100000L);
916 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
917 		VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect);
918 
919 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
920 		VCN, 0, regUVD_MPC_CNTL),
921 		0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
922 
923 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
924 		VCN, 0, regUVD_MPC_SET_MUXA0),
925 		((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
926 		 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
927 		 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
928 		 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
929 
930 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
931 		VCN, 0, regUVD_MPC_SET_MUXB0),
932 		 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
933 		 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
934 		 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
935 		 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
936 
937 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
938 		VCN, 0, regUVD_MPC_SET_MUX),
939 		((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
940 		 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
941 		 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
942 
943 	vcn_v4_0_3_mc_resume_dpg_mode(vinst, indirect);
944 
945 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
946 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
947 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
948 		VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
949 
950 	/* enable LMI MC and UMC channels */
951 	tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
952 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
953 		VCN, 0, regUVD_LMI_CTRL2), tmp, 0, indirect);
954 
955 	vcn_v4_0_3_enable_ras(adev, inst_idx, indirect);
956 
957 	/* enable master interrupt */
958 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
959 		VCN, 0, regUVD_MASTINT_EN),
960 		UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
961 
962 	if (indirect) {
963 		ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
964 		if (ret) {
965 			dev_err(adev->dev, "vcn sram load failed %d\n", ret);
966 			return ret;
967 		}
968 	}
969 
970 	ring = &adev->vcn.inst[inst_idx].ring_enc[0];
971 
972 	/* Pause dpg */
973 	vcn_v4_0_3_pause_dpg_mode(vinst, &state);
974 
975 	/* program the RB_BASE for ring buffer */
976 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO,
977 		     lower_32_bits(ring->gpu_addr));
978 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI,
979 		     upper_32_bits(ring->gpu_addr));
980 
981 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE,
982 		     ring->ring_size / sizeof(uint32_t));
983 
984 	/* resetting ring, fw should not check RB ring */
985 	tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
986 	tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
987 	WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
988 	fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
989 
990 	/* Initialize the ring buffer's read and write pointers */
991 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
992 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
993 	ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
994 
995 	tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
996 	tmp |= VCN_RB_ENABLE__RB_EN_MASK;
997 	WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
998 	fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
999 
1000 	/*resetting done, fw can check RB ring */
1001 	fw_shared->sq.queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1002 
1003 	/* Keeping one read-back to ensure all register writes are done,
1004 	 * otherwise it may introduce race conditions.
1005 	 */
1006 	RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
1007 
1008 	return 0;
1009 }
1010 
1011 static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
1012 {
1013 	int i, vcn_inst;
1014 	struct amdgpu_ring *ring_enc;
1015 	uint64_t cache_addr;
1016 	uint64_t rb_enc_addr;
1017 	uint64_t ctx_addr;
1018 	uint32_t param, resp, expected;
1019 	uint32_t offset, cache_size;
1020 	uint32_t tmp, timeout;
1021 
1022 	struct amdgpu_mm_table *table = &adev->virt.mm_table;
1023 	uint32_t *table_loc;
1024 	uint32_t table_size;
1025 	uint32_t size, size_dw;
1026 	uint32_t init_status;
1027 	uint32_t enabled_vcn;
1028 
1029 	struct mmsch_v4_0_cmd_direct_write
1030 		direct_wt = { {0} };
1031 	struct mmsch_v4_0_cmd_direct_read_modify_write
1032 		direct_rd_mod_wt = { {0} };
1033 	struct mmsch_v4_0_cmd_end end = { {0} };
1034 	struct mmsch_v4_0_3_init_header header;
1035 
1036 	struct amdgpu_vcn4_fw_shared *fw_shared;
1037 	struct amdgpu_fw_shared_rb_setup *rb_setup;
1038 
1039 	direct_wt.cmd_header.command_type =
1040 		MMSCH_COMMAND__DIRECT_REG_WRITE;
1041 	direct_rd_mod_wt.cmd_header.command_type =
1042 		MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1043 	end.cmd_header.command_type = MMSCH_COMMAND__END;
1044 
1045 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1046 		vcn_inst = GET_INST(VCN, i);
1047 
1048 		vcn_v4_0_3_fw_shared_init(adev, vcn_inst);
1049 
1050 		memset(&header, 0, sizeof(struct mmsch_v4_0_3_init_header));
1051 		header.version = MMSCH_VERSION;
1052 		header.total_size = sizeof(struct mmsch_v4_0_3_init_header) >> 2;
1053 
1054 		table_loc = (uint32_t *)table->cpu_addr;
1055 		table_loc += header.total_size;
1056 
1057 		table_size = 0;
1058 
1059 		MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS),
1060 			~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1061 
1062 		cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
1063 
1064 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1065 			MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1066 				regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1067 				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1068 
1069 			MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1070 				regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1071 				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1072 
1073 			offset = 0;
1074 			MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1075 				regUVD_VCPU_CACHE_OFFSET0), 0);
1076 		} else {
1077 			MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1078 				regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1079 				lower_32_bits(adev->vcn.inst[i].gpu_addr));
1080 			MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1081 				regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1082 				upper_32_bits(adev->vcn.inst[i].gpu_addr));
1083 			offset = cache_size;
1084 			MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1085 				regUVD_VCPU_CACHE_OFFSET0),
1086 				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1087 		}
1088 
1089 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1090 			regUVD_VCPU_CACHE_SIZE0),
1091 			cache_size);
1092 
1093 		cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset;
1094 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1095 			regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), lower_32_bits(cache_addr));
1096 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1097 			regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), upper_32_bits(cache_addr));
1098 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1099 			regUVD_VCPU_CACHE_OFFSET1), 0);
1100 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1101 			regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE);
1102 
1103 		cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset +
1104 			AMDGPU_VCN_STACK_SIZE;
1105 
1106 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1107 			regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), lower_32_bits(cache_addr));
1108 
1109 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1110 			regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), upper_32_bits(cache_addr));
1111 
1112 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1113 			regUVD_VCPU_CACHE_OFFSET2), 0);
1114 
1115 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1116 			regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE);
1117 
1118 		fw_shared = adev->vcn.inst[vcn_inst].fw_shared.cpu_addr;
1119 		rb_setup = &fw_shared->rb_setup;
1120 
1121 		ring_enc = &adev->vcn.inst[vcn_inst].ring_enc[0];
1122 		ring_enc->wptr = 0;
1123 		rb_enc_addr = ring_enc->gpu_addr;
1124 
1125 		rb_setup->is_rb_enabled_flags |= RB_ENABLED;
1126 		rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr);
1127 		rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr);
1128 		rb_setup->rb_size = ring_enc->ring_size / 4;
1129 		fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
1130 
1131 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1132 			regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
1133 			lower_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr));
1134 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1135 			regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
1136 			upper_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr));
1137 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
1138 			regUVD_VCPU_NONCACHE_SIZE0),
1139 			AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
1140 		MMSCH_V4_0_INSERT_END();
1141 
1142 		header.vcn0.init_status = 0;
1143 		header.vcn0.table_offset = header.total_size;
1144 		header.vcn0.table_size = table_size;
1145 		header.total_size += table_size;
1146 
1147 		/* Send init table to mmsch */
1148 		size = sizeof(struct mmsch_v4_0_3_init_header);
1149 		table_loc = (uint32_t *)table->cpu_addr;
1150 		memcpy((void *)table_loc, &header, size);
1151 
1152 		ctx_addr = table->gpu_addr;
1153 		WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
1154 		WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
1155 
1156 		tmp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID);
1157 		tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1158 		tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1159 		WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID, tmp);
1160 
1161 		size = header.total_size;
1162 		WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_SIZE, size);
1163 
1164 		WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP, 0);
1165 
1166 		param = 0x00000001;
1167 		WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_HOST, param);
1168 		tmp = 0;
1169 		timeout = 1000;
1170 		resp = 0;
1171 		expected = MMSCH_VF_MAILBOX_RESP__OK;
1172 		while (resp != expected) {
1173 			resp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP);
1174 			if (resp != 0)
1175 				break;
1176 
1177 			udelay(10);
1178 			tmp = tmp + 10;
1179 			if (tmp >= timeout) {
1180 				DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
1181 					" waiting for regMMSCH_VF_MAILBOX_RESP "\
1182 					"(expected=0x%08x, readback=0x%08x)\n",
1183 					tmp, expected, resp);
1184 				return -EBUSY;
1185 			}
1186 		}
1187 
1188 		enabled_vcn = amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 0) ? 1 : 0;
1189 		init_status = ((struct mmsch_v4_0_3_init_header *)(table_loc))->vcn0.init_status;
1190 		if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE
1191 					&& init_status != MMSCH_VF_ENGINE_STATUS__PASS) {
1192 			DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init "\
1193 				"status for VCN%x: 0x%x\n", resp, enabled_vcn, init_status);
1194 		}
1195 	}
1196 
1197 	return 0;
1198 }
1199 
1200 /**
1201  * vcn_v4_0_3_start - VCN start
1202  *
1203  * @vinst: VCN instance
1204  *
1205  * Start VCN block
1206  */
1207 static int vcn_v4_0_3_start(struct amdgpu_vcn_inst *vinst)
1208 {
1209 	struct amdgpu_device *adev = vinst->adev;
1210 	int i = vinst->inst;
1211 	struct amdgpu_vcn4_fw_shared *fw_shared;
1212 	struct amdgpu_ring *ring;
1213 	int j, k, r, vcn_inst;
1214 	uint32_t tmp;
1215 
1216 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1217 		return vcn_v4_0_3_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
1218 
1219 	vcn_inst = GET_INST(VCN, i);
1220 	/* set VCN status busy */
1221 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) |
1222 		UVD_STATUS__UVD_BUSY;
1223 	WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
1224 
1225 	/* SW clock gating */
1226 	vcn_v4_0_3_disable_clock_gating(vinst);
1227 
1228 	/* enable VCPU clock */
1229 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
1230 		 UVD_VCPU_CNTL__CLK_EN_MASK,
1231 		 ~UVD_VCPU_CNTL__CLK_EN_MASK);
1232 
1233 	/* disable master interrupt */
1234 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
1235 		 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1236 
1237 	/* enable LMI MC and UMC channels */
1238 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
1239 		 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1240 
1241 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
1242 	tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1243 	tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1244 	WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
1245 
1246 	/* setup regUVD_LMI_CTRL */
1247 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
1248 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL,
1249 		     tmp | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1250 		     UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1251 		     UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1252 		     UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1253 
1254 	/* setup regUVD_MPC_CNTL */
1255 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL);
1256 	tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1257 	tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1258 	WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL, tmp);
1259 
1260 	/* setup UVD_MPC_SET_MUXA0 */
1261 	WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXA0,
1262 		     ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1263 		      (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1264 		      (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1265 		      (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1266 
1267 	/* setup UVD_MPC_SET_MUXB0 */
1268 	WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXB0,
1269 		     ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1270 		      (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1271 		      (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1272 		      (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1273 
1274 	/* setup UVD_MPC_SET_MUX */
1275 	WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUX,
1276 		     ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1277 		      (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1278 		      (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1279 
1280 	vcn_v4_0_3_mc_resume(vinst);
1281 
1282 	/* VCN global tiling registers */
1283 	WREG32_SOC15(VCN, vcn_inst, regUVD_GFX8_ADDR_CONFIG,
1284 		     adev->gfx.config.gb_addr_config);
1285 	WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
1286 		     adev->gfx.config.gb_addr_config);
1287 
1288 	/* unblock VCPU register access */
1289 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
1290 		 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1291 
1292 	/* release VCPU reset to boot */
1293 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
1294 		 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1295 
1296 	for (j = 0; j < 10; ++j) {
1297 		uint32_t status;
1298 
1299 		for (k = 0; k < 100; ++k) {
1300 			status = RREG32_SOC15(VCN, vcn_inst,
1301 					      regUVD_STATUS);
1302 			if (status & 2)
1303 				break;
1304 			mdelay(10);
1305 		}
1306 		r = 0;
1307 		if (status & 2)
1308 			break;
1309 
1310 		DRM_DEV_ERROR(adev->dev,
1311 			      "VCN decode not responding, trying to reset the VCPU!!!\n");
1312 		WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
1313 					  regUVD_VCPU_CNTL),
1314 			 UVD_VCPU_CNTL__BLK_RST_MASK,
1315 			 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1316 		mdelay(10);
1317 		WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
1318 					  regUVD_VCPU_CNTL),
1319 			 0, ~UVD_VCPU_CNTL__BLK_RST_MASK);
1320 
1321 		mdelay(10);
1322 		r = -1;
1323 	}
1324 
1325 	if (r) {
1326 		DRM_DEV_ERROR(adev->dev, "VCN decode not responding, giving up!!!\n");
1327 		return r;
1328 	}
1329 
1330 	/* enable master interrupt */
1331 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
1332 		 UVD_MASTINT_EN__VCPU_EN_MASK,
1333 		 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1334 
1335 	/* clear the busy bit of VCN_STATUS */
1336 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
1337 		 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1338 
1339 	ring = &adev->vcn.inst[i].ring_enc[0];
1340 	fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1341 
1342 	/* program the RB_BASE for ring buffer */
1343 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO,
1344 		     lower_32_bits(ring->gpu_addr));
1345 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI,
1346 		     upper_32_bits(ring->gpu_addr));
1347 
1348 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE,
1349 		     ring->ring_size / sizeof(uint32_t));
1350 
1351 	/* resetting ring, fw should not check RB ring */
1352 	tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
1353 	tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
1354 	WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
1355 
1356 	/* Initialize the ring buffer's read and write pointers */
1357 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
1358 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
1359 
1360 	tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
1361 	tmp |= VCN_RB_ENABLE__RB_EN_MASK;
1362 	WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
1363 
1364 	ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
1365 	fw_shared->sq.queue_mode &=
1366 		cpu_to_le32(~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF));
1367 
1368 	return 0;
1369 }
1370 
1371 /**
1372  * vcn_v4_0_3_stop_dpg_mode - VCN stop with dpg mode
1373  *
1374  * @vinst: VCN instance
1375  *
1376  * Stop VCN block with dpg mode
1377  */
1378 static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
1379 {
1380 	struct amdgpu_device *adev = vinst->adev;
1381 	int inst_idx = vinst->inst;
1382 	uint32_t tmp;
1383 	int vcn_inst;
1384 	struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
1385 
1386 	vcn_inst = GET_INST(VCN, inst_idx);
1387 
1388 	/* Unpause dpg */
1389 	vcn_v4_0_3_pause_dpg_mode(vinst, &state);
1390 
1391 	/* Wait for power status to be 1 */
1392 	SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
1393 			   UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1394 
1395 	/* wait for read ptr to be equal to write ptr */
1396 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
1397 	SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1398 
1399 	SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
1400 			   UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1401 
1402 	/* disable dynamic power gating mode */
1403 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0,
1404 		 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1405 
1406 	/* Keeping one read-back to ensure all register writes are done,
1407 	 * otherwise it may introduce race conditions.
1408 	 */
1409 	RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
1410 
1411 	return 0;
1412 }
1413 
1414 /**
1415  * vcn_v4_0_3_stop - VCN stop
1416  *
1417  * @vinst: VCN instance
1418  *
1419  * Stop VCN block
1420  */
1421 static int vcn_v4_0_3_stop(struct amdgpu_vcn_inst *vinst)
1422 {
1423 	struct amdgpu_device *adev = vinst->adev;
1424 	int i = vinst->inst;
1425 	struct amdgpu_vcn4_fw_shared *fw_shared;
1426 	int r = 0, vcn_inst;
1427 	uint32_t tmp;
1428 
1429 	vcn_inst = GET_INST(VCN, i);
1430 
1431 	fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1432 	fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
1433 
1434 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1435 		vcn_v4_0_3_stop_dpg_mode(vinst);
1436 		goto Done;
1437 	}
1438 
1439 	/* wait for vcn idle */
1440 	r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS,
1441 			       UVD_STATUS__IDLE, 0x7);
1442 	if (r)
1443 		goto Done;
1444 
1445 	tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1446 		UVD_LMI_STATUS__READ_CLEAN_MASK |
1447 		UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1448 		UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1449 	r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
1450 			       tmp);
1451 	if (r)
1452 		goto Done;
1453 
1454 	/* stall UMC channel */
1455 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
1456 	tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1457 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
1458 	tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1459 		UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1460 	r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
1461 			       tmp);
1462 	if (r)
1463 		goto Done;
1464 
1465 	/* Unblock VCPU Register access */
1466 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
1467 		 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1468 		 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1469 
1470 	/* release VCPU reset to boot */
1471 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
1472 		 UVD_VCPU_CNTL__BLK_RST_MASK,
1473 		 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1474 
1475 	/* disable VCPU clock */
1476 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
1477 		 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1478 
1479 	/* reset LMI UMC/LMI/VCPU */
1480 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
1481 	tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1482 	WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
1483 
1484 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
1485 	tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1486 	WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
1487 
1488 	/* clear VCN status */
1489 	WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
1490 
1491 	/* apply HW clock gating */
1492 	vcn_v4_0_3_enable_clock_gating(vinst);
1493 
1494 	/* Keeping one read-back to ensure all register writes are done,
1495 	 * otherwise it may introduce race conditions.
1496 	 */
1497 	RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
1498 
1499 Done:
1500 	return 0;
1501 }
1502 
1503 /**
1504  * vcn_v4_0_3_pause_dpg_mode - VCN pause with dpg mode
1505  *
1506  * @vinst: VCN instance
1507  * @new_state: pause state
1508  *
1509  * Pause dpg mode for VCN block
1510  */
1511 static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
1512 				     struct dpg_pause_state *new_state)
1513 {
1514 	struct amdgpu_device *adev = vinst->adev;
1515 	int inst_idx = vinst->inst;
1516 	uint32_t reg_data = 0;
1517 	int ret_code;
1518 
1519 	/* pause/unpause if state is changed */
1520 	if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1521 		DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d",
1522 			adev->vcn.inst[inst_idx].pause_state.fw_based,	new_state->fw_based);
1523 		reg_data = RREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE) &
1524 			(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1525 
1526 		if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1527 			ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 0x1,
1528 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1529 
1530 			if (!ret_code) {
1531 				/* pause DPG */
1532 				reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1533 				WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
1534 
1535 				/* wait for ACK */
1536 				SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_DPG_PAUSE,
1537 					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1538 					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1539 			}
1540 		} else {
1541 			/* unpause dpg, no need to wait */
1542 			reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1543 			WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
1544 		}
1545 		adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1546 	}
1547 
1548 	return 0;
1549 }
1550 
1551 /**
1552  * vcn_v4_0_3_unified_ring_get_rptr - get unified read pointer
1553  *
1554  * @ring: amdgpu_ring pointer
1555  *
1556  * Returns the current hardware unified read pointer
1557  */
1558 static uint64_t vcn_v4_0_3_unified_ring_get_rptr(struct amdgpu_ring *ring)
1559 {
1560 	struct amdgpu_device *adev = ring->adev;
1561 
1562 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1563 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1564 
1565 	return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_RPTR);
1566 }
1567 
1568 /**
1569  * vcn_v4_0_3_unified_ring_get_wptr - get unified write pointer
1570  *
1571  * @ring: amdgpu_ring pointer
1572  *
1573  * Returns the current hardware unified write pointer
1574  */
1575 static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring)
1576 {
1577 	struct amdgpu_device *adev = ring->adev;
1578 
1579 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1580 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1581 
1582 	if (ring->use_doorbell)
1583 		return *ring->wptr_cpu_addr;
1584 	else
1585 		return RREG32_SOC15(VCN, GET_INST(VCN, ring->me),
1586 				    regUVD_RB_WPTR);
1587 }
1588 
1589 void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1590 				       uint32_t val, uint32_t mask)
1591 {
1592 	/* Use normalized offsets when required */
1593 	if (vcn_v4_0_3_normalizn_reqd(ring->adev))
1594 		reg = NORMALIZE_VCN_REG_OFFSET(reg);
1595 
1596 	amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1597 	amdgpu_ring_write(ring, reg << 2);
1598 	amdgpu_ring_write(ring, mask);
1599 	amdgpu_ring_write(ring, val);
1600 }
1601 
1602 void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
1603 				   uint32_t val)
1604 {
1605 	/* Use normalized offsets when required */
1606 	if (vcn_v4_0_3_normalizn_reqd(ring->adev))
1607 		reg = NORMALIZE_VCN_REG_OFFSET(reg);
1608 
1609 	amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1610 	amdgpu_ring_write(ring,	reg << 2);
1611 	amdgpu_ring_write(ring, val);
1612 }
1613 
1614 void vcn_v4_0_3_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1615 				       unsigned int vmid, uint64_t pd_addr)
1616 {
1617 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
1618 
1619 	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1620 
1621 	/* wait for reg writes */
1622 	vcn_v4_0_3_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1623 					vmid * hub->ctx_addr_distance,
1624 					lower_32_bits(pd_addr), 0xffffffff);
1625 }
1626 
1627 void vcn_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1628 {
1629 	/* VCN engine access for HDP flush doesn't work when RRMT is enabled.
1630 	 * This is a workaround to avoid any HDP flush through VCN ring.
1631 	 */
1632 }
1633 
1634 /**
1635  * vcn_v4_0_3_unified_ring_set_wptr - set enc write pointer
1636  *
1637  * @ring: amdgpu_ring pointer
1638  *
1639  * Commits the enc write pointer to the hardware
1640  */
1641 static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring)
1642 {
1643 	struct amdgpu_device *adev = ring->adev;
1644 
1645 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1646 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1647 
1648 	if (ring->use_doorbell) {
1649 		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1650 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1651 	} else {
1652 		WREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR,
1653 			     lower_32_bits(ring->wptr));
1654 	}
1655 }
1656 
1657 static int vcn_v4_0_3_reset_jpeg_pre_helper(struct amdgpu_device *adev, int inst)
1658 {
1659 	struct amdgpu_ring *ring;
1660 	uint32_t wait_seq = 0;
1661 	int i;
1662 
1663 	for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) {
1664 		ring = &adev->jpeg.inst[inst].ring_dec[i];
1665 
1666 		drm_sched_wqueue_stop(&ring->sched);
1667 		/* Get the last emitted fence sequence */
1668 		wait_seq = atomic_read(&ring->fence_drv.last_seq);
1669 		if (wait_seq)
1670 			continue;
1671 
1672 		/* if Jobs are still pending after timeout,
1673 		 * We'll handle them in the bottom helper
1674 		 */
1675 		amdgpu_fence_wait_polling(ring, wait_seq, adev->video_timeout);
1676        }
1677 
1678 	return 0;
1679 }
1680 
1681 static int vcn_v4_0_3_reset_jpeg_post_helper(struct amdgpu_device *adev, int inst)
1682 {
1683 	struct amdgpu_ring *ring;
1684 	int i, r = 0;
1685 
1686 	for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) {
1687 		ring = &adev->jpeg.inst[inst].ring_dec[i];
1688 		/* Force completion of any remaining jobs */
1689 		amdgpu_fence_driver_force_completion(ring);
1690 
1691 		if (ring->use_doorbell)
1692 			WREG32_SOC15_OFFSET(
1693 				VCN, GET_INST(VCN, inst),
1694 				regVCN_JPEG_DB_CTRL,
1695 				(ring->pipe ? (ring->pipe - 0x15) : 0),
1696 				ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
1697 				VCN_JPEG_DB_CTRL__EN_MASK);
1698 
1699 		r = amdgpu_ring_test_helper(ring);
1700 		if (r)
1701 			return r;
1702 
1703 		drm_sched_wqueue_start(&ring->sched);
1704 
1705 		DRM_DEV_DEBUG(adev->dev, "JPEG ring %d (inst %d) restored and sched restarted\n",
1706 		      i, inst);
1707 	}
1708 	return 0;
1709 }
1710 
1711 static int vcn_v4_0_3_ring_reset(struct amdgpu_ring *ring,
1712 				 unsigned int vmid,
1713 				 struct amdgpu_fence *timedout_fence)
1714 {
1715 	int r = 0;
1716 	int vcn_inst;
1717 	struct amdgpu_device *adev = ring->adev;
1718 	struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
1719 	bool pg_state = false;
1720 
1721 	/* take the vcn reset mutex here because resetting VCN will reset jpeg as well */
1722 	mutex_lock(&vinst->engine_reset_mutex);
1723 	mutex_lock(&adev->jpeg.jpeg_pg_lock);
1724 	/* Ensure JPEG is powered on during reset if currently gated */
1725 	if (adev->jpeg.cur_state == AMD_PG_STATE_GATE) {
1726 		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG,
1727 						       AMD_PG_STATE_UNGATE);
1728 		pg_state = true;
1729 	}
1730 
1731 	vcn_v4_0_3_reset_jpeg_pre_helper(adev, ring->me);
1732 	amdgpu_ring_reset_helper_begin(ring, timedout_fence);
1733 
1734 	vcn_inst = GET_INST(VCN, ring->me);
1735 	r = amdgpu_dpm_reset_vcn(adev, 1 << vcn_inst);
1736 
1737 	if (r) {
1738 		DRM_DEV_ERROR(adev->dev, "VCN reset fail : %d\n", r);
1739 		/* Restore JPEG power gating state if it was originally gated */
1740 		if (pg_state)
1741 			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG,
1742 							       AMD_PG_STATE_GATE);
1743 		mutex_unlock(&adev->jpeg.jpeg_pg_lock);
1744 		goto unlock;
1745 	}
1746 
1747 	/* This flag is not set for VF, assumed to be disabled always */
1748 	if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
1749 		adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
1750 	vcn_v4_0_3_hw_init_inst(vinst);
1751 	vcn_v4_0_3_start_dpg_mode(vinst, adev->vcn.inst[ring->me].indirect_sram);
1752 
1753 	r = amdgpu_ring_reset_helper_end(ring, timedout_fence);
1754 	if (r) {
1755 		if (pg_state)
1756 			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG,
1757 							       AMD_PG_STATE_GATE);
1758 		mutex_unlock(&adev->jpeg.jpeg_pg_lock);
1759 		goto unlock;
1760 	}
1761 
1762 	if (pg_state)
1763 		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG,
1764 						       AMD_PG_STATE_GATE);
1765 	mutex_unlock(&adev->jpeg.jpeg_pg_lock);
1766 	r = vcn_v4_0_3_reset_jpeg_post_helper(adev, ring->me);
1767 
1768 unlock:
1769 	mutex_unlock(&vinst->engine_reset_mutex);
1770 
1771 	return r;
1772 }
1773 
1774 static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
1775 	.type = AMDGPU_RING_TYPE_VCN_ENC,
1776 	.align_mask = 0x3f,
1777 	.nop = VCN_ENC_CMD_NO_OP,
1778 	.get_rptr = vcn_v4_0_3_unified_ring_get_rptr,
1779 	.get_wptr = vcn_v4_0_3_unified_ring_get_wptr,
1780 	.set_wptr = vcn_v4_0_3_unified_ring_set_wptr,
1781 	.emit_frame_size =
1782 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1783 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1784 		4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1785 		5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1786 		1, /* vcn_v2_0_enc_ring_insert_end */
1787 	.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1788 	.emit_ib = vcn_v2_0_enc_ring_emit_ib,
1789 	.emit_fence = vcn_v2_0_enc_ring_emit_fence,
1790 	.emit_vm_flush = vcn_v4_0_3_enc_ring_emit_vm_flush,
1791 	.emit_hdp_flush = vcn_v4_0_3_ring_emit_hdp_flush,
1792 	.test_ring = amdgpu_vcn_enc_ring_test_ring,
1793 	.test_ib = amdgpu_vcn_unified_ring_test_ib,
1794 	.insert_nop = amdgpu_ring_insert_nop,
1795 	.insert_end = vcn_v2_0_enc_ring_insert_end,
1796 	.pad_ib = amdgpu_ring_generic_pad_ib,
1797 	.begin_use = amdgpu_vcn_ring_begin_use,
1798 	.end_use = amdgpu_vcn_ring_end_use,
1799 	.emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg,
1800 	.emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait,
1801 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1802 	.reset = vcn_v4_0_3_ring_reset,
1803 };
1804 
1805 /**
1806  * vcn_v4_0_3_set_unified_ring_funcs - set unified ring functions
1807  *
1808  * @adev: amdgpu_device pointer
1809  *
1810  * Set unified ring functions
1811  */
1812 static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev)
1813 {
1814 	int i, vcn_inst;
1815 
1816 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1817 		adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v4_0_3_unified_ring_vm_funcs;
1818 		adev->vcn.inst[i].ring_enc[0].me = i;
1819 		vcn_inst = GET_INST(VCN, i);
1820 		adev->vcn.inst[i].aid_id =
1821 			vcn_inst / adev->vcn.num_inst_per_aid;
1822 	}
1823 }
1824 
1825 /**
1826  * vcn_v4_0_3_is_idle - check VCN block is idle
1827  *
1828  * @ip_block: Pointer to the amdgpu_ip_block structure
1829  *
1830  * Check whether VCN block is idle
1831  */
1832 static bool vcn_v4_0_3_is_idle(struct amdgpu_ip_block *ip_block)
1833 {
1834 	struct amdgpu_device *adev = ip_block->adev;
1835 	int i, ret = 1;
1836 
1837 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1838 		ret &= (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) ==
1839 			UVD_STATUS__IDLE);
1840 	}
1841 
1842 	return ret;
1843 }
1844 
1845 /**
1846  * vcn_v4_0_3_wait_for_idle - wait for VCN block idle
1847  *
1848  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
1849  *
1850  * Wait for VCN block idle
1851  */
1852 static int vcn_v4_0_3_wait_for_idle(struct amdgpu_ip_block *ip_block)
1853 {
1854 	struct amdgpu_device *adev = ip_block->adev;
1855 	int i, ret = 0;
1856 
1857 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1858 		ret = SOC15_WAIT_ON_RREG(VCN, GET_INST(VCN, i), regUVD_STATUS,
1859 					 UVD_STATUS__IDLE, UVD_STATUS__IDLE);
1860 		if (ret)
1861 			return ret;
1862 	}
1863 
1864 	return ret;
1865 }
1866 
1867 /* vcn_v4_0_3_set_clockgating_state - set VCN block clockgating state
1868  *
1869  * @ip_block: amdgpu_ip_block pointer
1870  * @state: clock gating state
1871  *
1872  * Set VCN block clockgating state
1873  */
1874 static int vcn_v4_0_3_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1875 					  enum amd_clockgating_state state)
1876 {
1877 	struct amdgpu_device *adev = ip_block->adev;
1878 	bool enable = state == AMD_CG_STATE_GATE;
1879 	int i;
1880 
1881 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1882 		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
1883 
1884 		if (enable) {
1885 			if (RREG32_SOC15(VCN, GET_INST(VCN, i),
1886 					 regUVD_STATUS) != UVD_STATUS__IDLE)
1887 				return -EBUSY;
1888 			vcn_v4_0_3_enable_clock_gating(vinst);
1889 		} else {
1890 			vcn_v4_0_3_disable_clock_gating(vinst);
1891 		}
1892 	}
1893 	return 0;
1894 }
1895 
1896 static int vcn_v4_0_3_set_pg_state(struct amdgpu_vcn_inst *vinst,
1897 				   enum amd_powergating_state state)
1898 {
1899 	struct amdgpu_device *adev = vinst->adev;
1900 	int ret = 0;
1901 
1902 	/* for SRIOV, guest should not control VCN Power-gating
1903 	 * MMSCH FW should control Power-gating and clock-gating
1904 	 * guest should avoid touching CGC and PG
1905 	 */
1906 	if (amdgpu_sriov_vf(adev)) {
1907 		vinst->cur_state = AMD_PG_STATE_UNGATE;
1908 		return 0;
1909 	}
1910 
1911 	if (state == vinst->cur_state)
1912 		return 0;
1913 
1914 	if (state == AMD_PG_STATE_GATE)
1915 		ret = vcn_v4_0_3_stop(vinst);
1916 	else
1917 		ret = vcn_v4_0_3_start(vinst);
1918 
1919 	if (!ret)
1920 		vinst->cur_state = state;
1921 
1922 	return ret;
1923 }
1924 
1925 /**
1926  * vcn_v4_0_3_set_interrupt_state - set VCN block interrupt state
1927  *
1928  * @adev: amdgpu_device pointer
1929  * @source: interrupt sources
1930  * @type: interrupt types
1931  * @state: interrupt states
1932  *
1933  * Set VCN block interrupt state
1934  */
1935 static int vcn_v4_0_3_set_interrupt_state(struct amdgpu_device *adev,
1936 					struct amdgpu_irq_src *source,
1937 					unsigned int type,
1938 					enum amdgpu_interrupt_state state)
1939 {
1940 	return 0;
1941 }
1942 
1943 /**
1944  * vcn_v4_0_3_process_interrupt - process VCN block interrupt
1945  *
1946  * @adev: amdgpu_device pointer
1947  * @source: interrupt sources
1948  * @entry: interrupt entry from clients and sources
1949  *
1950  * Process VCN block interrupt
1951  */
1952 static int vcn_v4_0_3_process_interrupt(struct amdgpu_device *adev,
1953 				      struct amdgpu_irq_src *source,
1954 				      struct amdgpu_iv_entry *entry)
1955 {
1956 	uint32_t i, inst;
1957 
1958 	i = node_id_to_phys_map[entry->node_id];
1959 
1960 	DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n");
1961 
1962 	for (inst = 0; inst < adev->vcn.num_vcn_inst; ++inst)
1963 		if (adev->vcn.inst[inst].aid_id == i)
1964 			break;
1965 
1966 	if (inst >= adev->vcn.num_vcn_inst) {
1967 		dev_WARN_ONCE(adev->dev, 1,
1968 			      "Interrupt received for unknown VCN instance %d",
1969 			      entry->node_id);
1970 		return 0;
1971 	}
1972 
1973 	switch (entry->src_id) {
1974 	case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1975 		amdgpu_fence_process(&adev->vcn.inst[inst].ring_enc[0]);
1976 		break;
1977 	default:
1978 		DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
1979 			  entry->src_id, entry->src_data[0]);
1980 		break;
1981 	}
1982 
1983 	return 0;
1984 }
1985 
1986 static int vcn_v4_0_3_set_ras_interrupt_state(struct amdgpu_device *adev,
1987 					struct amdgpu_irq_src *source,
1988 					unsigned int type,
1989 					enum amdgpu_interrupt_state state)
1990 {
1991 	return 0;
1992 }
1993 
1994 static const struct amdgpu_irq_src_funcs vcn_v4_0_3_irq_funcs = {
1995 	.set = vcn_v4_0_3_set_interrupt_state,
1996 	.process = vcn_v4_0_3_process_interrupt,
1997 };
1998 
1999 static const struct amdgpu_irq_src_funcs vcn_v4_0_3_ras_irq_funcs = {
2000 	.set = vcn_v4_0_3_set_ras_interrupt_state,
2001 	.process = amdgpu_vcn_process_poison_irq,
2002 };
2003 
2004 /**
2005  * vcn_v4_0_3_set_irq_funcs - set VCN block interrupt irq functions
2006  *
2007  * @adev: amdgpu_device pointer
2008  *
2009  * Set VCN block interrupt irq functions
2010  */
2011 static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
2012 {
2013 	int i;
2014 
2015 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2016 		adev->vcn.inst->irq.num_types++;
2017 	}
2018 	adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs;
2019 
2020 	adev->vcn.inst->ras_poison_irq.num_types = 1;
2021 	adev->vcn.inst->ras_poison_irq.funcs = &vcn_v4_0_3_ras_irq_funcs;
2022 }
2023 
2024 static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
2025 	.name = "vcn_v4_0_3",
2026 	.early_init = vcn_v4_0_3_early_init,
2027 	.late_init = vcn_v4_0_3_late_init,
2028 	.sw_init = vcn_v4_0_3_sw_init,
2029 	.sw_fini = vcn_v4_0_3_sw_fini,
2030 	.hw_init = vcn_v4_0_3_hw_init,
2031 	.hw_fini = vcn_v4_0_3_hw_fini,
2032 	.suspend = vcn_v4_0_3_suspend,
2033 	.resume = vcn_v4_0_3_resume,
2034 	.is_idle = vcn_v4_0_3_is_idle,
2035 	.wait_for_idle = vcn_v4_0_3_wait_for_idle,
2036 	.set_clockgating_state = vcn_v4_0_3_set_clockgating_state,
2037 	.set_powergating_state = vcn_set_powergating_state,
2038 	.dump_ip_state = amdgpu_vcn_dump_ip_state,
2039 	.print_ip_state = amdgpu_vcn_print_ip_state,
2040 };
2041 
2042 const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = {
2043 	.type = AMD_IP_BLOCK_TYPE_VCN,
2044 	.major = 4,
2045 	.minor = 0,
2046 	.rev = 3,
2047 	.funcs = &vcn_v4_0_3_ip_funcs,
2048 };
2049 
2050 static const struct amdgpu_ras_err_status_reg_entry vcn_v4_0_3_ue_reg_list[] = {
2051 	{AMDGPU_RAS_REG_ENTRY(VCN, 0, regVCN_UE_ERR_STATUS_LO_VIDD, regVCN_UE_ERR_STATUS_HI_VIDD),
2052 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "VIDD"},
2053 	{AMDGPU_RAS_REG_ENTRY(VCN, 0, regVCN_UE_ERR_STATUS_LO_VIDV, regVCN_UE_ERR_STATUS_HI_VIDV),
2054 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "VIDV"},
2055 };
2056 
2057 static void vcn_v4_0_3_inst_query_ras_error_count(struct amdgpu_device *adev,
2058 						  uint32_t vcn_inst,
2059 						  void *ras_err_status)
2060 {
2061 	struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status;
2062 
2063 	/* vcn v4_0_3 only support query uncorrectable errors */
2064 	amdgpu_ras_inst_query_ras_error_count(adev,
2065 			vcn_v4_0_3_ue_reg_list,
2066 			ARRAY_SIZE(vcn_v4_0_3_ue_reg_list),
2067 			NULL, 0, GET_INST(VCN, vcn_inst),
2068 			AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
2069 			&err_data->ue_count);
2070 }
2071 
2072 static void vcn_v4_0_3_query_ras_error_count(struct amdgpu_device *adev,
2073 					     void *ras_err_status)
2074 {
2075 	uint32_t i;
2076 
2077 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) {
2078 		dev_warn(adev->dev, "VCN RAS is not supported\n");
2079 		return;
2080 	}
2081 
2082 	for (i = 0; i < adev->vcn.num_vcn_inst; i++)
2083 		vcn_v4_0_3_inst_query_ras_error_count(adev, i, ras_err_status);
2084 }
2085 
2086 static void vcn_v4_0_3_inst_reset_ras_error_count(struct amdgpu_device *adev,
2087 						  uint32_t vcn_inst)
2088 {
2089 	amdgpu_ras_inst_reset_ras_error_count(adev,
2090 					vcn_v4_0_3_ue_reg_list,
2091 					ARRAY_SIZE(vcn_v4_0_3_ue_reg_list),
2092 					GET_INST(VCN, vcn_inst));
2093 }
2094 
2095 static void vcn_v4_0_3_reset_ras_error_count(struct amdgpu_device *adev)
2096 {
2097 	uint32_t i;
2098 
2099 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) {
2100 		dev_warn(adev->dev, "VCN RAS is not supported\n");
2101 		return;
2102 	}
2103 
2104 	for (i = 0; i < adev->vcn.num_vcn_inst; i++)
2105 		vcn_v4_0_3_inst_reset_ras_error_count(adev, i);
2106 }
2107 
2108 static uint32_t vcn_v4_0_3_query_poison_by_instance(struct amdgpu_device *adev,
2109 			uint32_t instance, uint32_t sub_block)
2110 {
2111 	uint32_t poison_stat = 0, reg_value = 0;
2112 
2113 	switch (sub_block) {
2114 	case AMDGPU_VCN_V4_0_3_VCPU_VCODEC:
2115 		reg_value = RREG32_SOC15(VCN, instance, regUVD_RAS_VCPU_VCODEC_STATUS);
2116 		poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
2117 		break;
2118 	default:
2119 		break;
2120 	}
2121 
2122 	if (poison_stat)
2123 		dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
2124 			instance, sub_block);
2125 
2126 	return poison_stat;
2127 }
2128 
2129 static bool vcn_v4_0_3_query_poison_status(struct amdgpu_device *adev)
2130 {
2131 	uint32_t inst, sub;
2132 	uint32_t poison_stat = 0;
2133 
2134 	for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
2135 		for (sub = 0; sub < AMDGPU_VCN_V4_0_3_MAX_SUB_BLOCK; sub++)
2136 			poison_stat +=
2137 			vcn_v4_0_3_query_poison_by_instance(adev, inst, sub);
2138 
2139 	return !!poison_stat;
2140 }
2141 
2142 static const struct amdgpu_ras_block_hw_ops vcn_v4_0_3_ras_hw_ops = {
2143 	.query_ras_error_count = vcn_v4_0_3_query_ras_error_count,
2144 	.reset_ras_error_count = vcn_v4_0_3_reset_ras_error_count,
2145 	.query_poison_status = vcn_v4_0_3_query_poison_status,
2146 };
2147 
2148 static int vcn_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
2149 				      enum aca_smu_type type, void *data)
2150 {
2151 	struct aca_bank_info info;
2152 	u64 misc0;
2153 	int ret;
2154 
2155 	ret = aca_bank_info_decode(bank, &info);
2156 	if (ret)
2157 		return ret;
2158 
2159 	misc0 = bank->regs[ACA_REG_IDX_MISC0];
2160 	switch (type) {
2161 	case ACA_SMU_TYPE_UE:
2162 		bank->aca_err_type = ACA_ERROR_TYPE_UE;
2163 		ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
2164 						     1ULL);
2165 		break;
2166 	case ACA_SMU_TYPE_CE:
2167 		bank->aca_err_type = ACA_ERROR_TYPE_CE;
2168 		ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
2169 						     ACA_REG__MISC0__ERRCNT(misc0));
2170 		break;
2171 	default:
2172 		return -EINVAL;
2173 	}
2174 
2175 	return ret;
2176 }
2177 
2178 /* reference to smu driver if header file */
2179 static int vcn_v4_0_3_err_codes[] = {
2180 	14, 15, /* VCN */
2181 };
2182 
2183 static bool vcn_v4_0_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
2184 					 enum aca_smu_type type, void *data)
2185 {
2186 	u32 instlo;
2187 
2188 	instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
2189 	instlo &= GENMASK(31, 1);
2190 
2191 	if (instlo != mmSMNAID_AID0_MCA_SMU)
2192 		return false;
2193 
2194 	if (aca_bank_check_error_codes(handle->adev, bank,
2195 				       vcn_v4_0_3_err_codes,
2196 				       ARRAY_SIZE(vcn_v4_0_3_err_codes)))
2197 		return false;
2198 
2199 	return true;
2200 }
2201 
2202 static const struct aca_bank_ops vcn_v4_0_3_aca_bank_ops = {
2203 	.aca_bank_parser = vcn_v4_0_3_aca_bank_parser,
2204 	.aca_bank_is_valid = vcn_v4_0_3_aca_bank_is_valid,
2205 };
2206 
2207 static const struct aca_info vcn_v4_0_3_aca_info = {
2208 	.hwip = ACA_HWIP_TYPE_SMU,
2209 	.mask = ACA_ERROR_UE_MASK,
2210 	.bank_ops = &vcn_v4_0_3_aca_bank_ops,
2211 };
2212 
2213 static int vcn_v4_0_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
2214 {
2215 	int r;
2216 
2217 	r = amdgpu_ras_block_late_init(adev, ras_block);
2218 	if (r)
2219 		return r;
2220 
2221 	if (amdgpu_ras_is_supported(adev, ras_block->block) &&
2222 		adev->vcn.inst->ras_poison_irq.funcs) {
2223 		r = amdgpu_irq_get(adev, &adev->vcn.inst->ras_poison_irq, 0);
2224 		if (r)
2225 			goto late_fini;
2226 	}
2227 
2228 	r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__VCN,
2229 				&vcn_v4_0_3_aca_info, NULL);
2230 	if (r)
2231 		goto late_fini;
2232 
2233 	return 0;
2234 
2235 late_fini:
2236 	amdgpu_ras_block_late_fini(adev, ras_block);
2237 
2238 	return r;
2239 }
2240 
2241 static struct amdgpu_vcn_ras vcn_v4_0_3_ras = {
2242 	.ras_block = {
2243 		.hw_ops = &vcn_v4_0_3_ras_hw_ops,
2244 		.ras_late_init = vcn_v4_0_3_ras_late_init,
2245 	},
2246 };
2247 
2248 static void vcn_v4_0_3_set_ras_funcs(struct amdgpu_device *adev)
2249 {
2250 	adev->vcn.ras = &vcn_v4_0_3_ras;
2251 }
2252 
2253 static void vcn_v4_0_3_enable_ras(struct amdgpu_device *adev,
2254 				  int inst_idx, bool indirect)
2255 {
2256 	uint32_t tmp;
2257 
2258 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
2259 		return;
2260 
2261 	tmp = VCN_RAS_CNTL__VCPU_VCODEC_REARM_MASK |
2262 	      VCN_RAS_CNTL__VCPU_VCODEC_IH_EN_MASK |
2263 	      VCN_RAS_CNTL__VCPU_VCODEC_PMI_EN_MASK |
2264 	      VCN_RAS_CNTL__VCPU_VCODEC_STALL_EN_MASK;
2265 	WREG32_SOC15_DPG_MODE(inst_idx,
2266 			      SOC15_DPG_MODE_OFFSET(VCN, 0, regVCN_RAS_CNTL),
2267 			      tmp, 0, indirect);
2268 
2269 	tmp = UVD_VCPU_INT_EN2__RASCNTL_VCPU_VCODEC_EN_MASK;
2270 	WREG32_SOC15_DPG_MODE(inst_idx,
2271 			      SOC15_DPG_MODE_OFFSET(VCN, 0, regUVD_VCPU_INT_EN2),
2272 			      tmp, 0, indirect);
2273 
2274 	tmp = UVD_SYS_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
2275 	WREG32_SOC15_DPG_MODE(inst_idx,
2276 			      SOC15_DPG_MODE_OFFSET(VCN, 0, regUVD_SYS_INT_EN),
2277 			      tmp, 0, indirect);
2278 }
2279