xref: /linux/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c (revision c2aa3089ad7e7fec3ec4a58d8d0904b5e9b392a1)
1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include "amdgpu.h"
26 #include "amdgpu_vcn.h"
27 #include "amdgpu_pm.h"
28 #include "amdgpu_cs.h"
29 #include "soc15.h"
30 #include "soc15d.h"
31 #include "soc15_hw_ip.h"
32 #include "vcn_v2_0.h"
33 #include "mmsch_v4_0.h"
34 #include "vcn_v4_0.h"
35 
36 #include "vcn/vcn_4_0_0_offset.h"
37 #include "vcn/vcn_4_0_0_sh_mask.h"
38 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
39 
40 #include <drm/drm_drv.h>
41 
42 #define mmUVD_DPG_LMA_CTL							regUVD_DPG_LMA_CTL
43 #define mmUVD_DPG_LMA_CTL_BASE_IDX						regUVD_DPG_LMA_CTL_BASE_IDX
44 #define mmUVD_DPG_LMA_DATA							regUVD_DPG_LMA_DATA
45 #define mmUVD_DPG_LMA_DATA_BASE_IDX						regUVD_DPG_LMA_DATA_BASE_IDX
46 
47 #define VCN_VID_SOC_ADDRESS_2_0							0x1fb00
48 #define VCN1_VID_SOC_ADDRESS_3_0						0x48300
49 #define VCN1_AON_SOC_ADDRESS_3_0						0x48000
50 
51 #define VCN_HARVEST_MMSCH								0
52 
53 #define RDECODE_MSG_CREATE							0x00000000
54 #define RDECODE_MESSAGE_CREATE							0x00000001
55 
56 static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0[] = {
57 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
58 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
59 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
60 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
61 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
62 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
63 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
64 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
65 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
66 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
67 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
68 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
69 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
70 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
71 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
72 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
73 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
74 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
75 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
76 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
77 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
78 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
79 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
80 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
81 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
82 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
83 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
84 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_CONFIG),
85 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_STATUS),
86 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
87 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
88 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
89 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
90 };
91 
92 static int amdgpu_ih_clientid_vcns[] = {
93 	SOC15_IH_CLIENTID_VCN,
94 	SOC15_IH_CLIENTID_VCN1
95 };
96 
97 static int vcn_v4_0_start_sriov(struct amdgpu_device *adev);
98 static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev);
99 static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev);
100 static int vcn_v4_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
101 				 enum amd_powergating_state state);
102 static int vcn_v4_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
103 				   struct dpg_pause_state *new_state);
104 static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
105 static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev);
106 
107 /**
108  * vcn_v4_0_early_init - set function pointers and load microcode
109  *
110  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
111  *
112  * Set ring and irq function pointers
113  * Load microcode from filesystem
114  */
115 static int vcn_v4_0_early_init(struct amdgpu_ip_block *ip_block)
116 {
117 	struct amdgpu_device *adev = ip_block->adev;
118 	int i, r;
119 
120 	if (amdgpu_sriov_vf(adev)) {
121 		adev->vcn.harvest_config = VCN_HARVEST_MMSCH;
122 		for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
123 			if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
124 				adev->vcn.harvest_config |= 1 << i;
125 				dev_info(adev->dev, "VCN%d is disabled by hypervisor\n", i);
126 			}
127 		}
128 	}
129 
130 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
131 		/* re-use enc ring as unified ring */
132 		adev->vcn.inst[i].num_enc_rings = 1;
133 
134 	vcn_v4_0_set_unified_ring_funcs(adev);
135 	vcn_v4_0_set_irq_funcs(adev);
136 	vcn_v4_0_set_ras_funcs(adev);
137 
138 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
139 		adev->vcn.inst[i].set_pg_state = vcn_v4_0_set_pg_state;
140 
141 		r = amdgpu_vcn_early_init(adev, i);
142 		if (r)
143 			return r;
144 	}
145 
146 	return 0;
147 }
148 
149 static int vcn_v4_0_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
150 {
151 	volatile struct amdgpu_vcn4_fw_shared *fw_shared;
152 
153 	fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
154 	fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
155 	fw_shared->sq.is_enabled = 1;
156 
157 	fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG);
158 	fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ?
159 		AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU;
160 
161 	if (amdgpu_ip_version(adev, VCN_HWIP, 0) ==
162 	    IP_VERSION(4, 0, 2)) {
163 		fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT;
164 		fw_shared->drm_key_wa.method =
165 			AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING;
166 	}
167 
168 	if (amdgpu_vcnfw_log)
169 		amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]);
170 
171 	return 0;
172 }
173 
174 /**
175  * vcn_v4_0_sw_init - sw init for VCN block
176  *
177  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
178  *
179  * Load firmware and sw initialization
180  */
181 static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
182 {
183 	struct amdgpu_ring *ring;
184 	struct amdgpu_device *adev = ip_block->adev;
185 	int i, r;
186 	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
187 	uint32_t *ptr;
188 
189 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
190 		if (adev->vcn.harvest_config & (1 << i))
191 			continue;
192 
193 		r = amdgpu_vcn_sw_init(adev, i);
194 		if (r)
195 			return r;
196 
197 		amdgpu_vcn_setup_ucode(adev, i);
198 
199 		r = amdgpu_vcn_resume(adev, i);
200 		if (r)
201 			return r;
202 
203 		/* Init instance 0 sched_score to 1, so it's scheduled after other instances */
204 		if (i == 0)
205 			atomic_set(&adev->vcn.inst[i].sched_score, 1);
206 		else
207 			atomic_set(&adev->vcn.inst[i].sched_score, 0);
208 
209 		/* VCN UNIFIED TRAP */
210 		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
211 				VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
212 		if (r)
213 			return r;
214 
215 		/* VCN POISON TRAP */
216 		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
217 				VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].ras_poison_irq);
218 		if (r)
219 			return r;
220 
221 		ring = &adev->vcn.inst[i].ring_enc[0];
222 		ring->use_doorbell = true;
223 		if (amdgpu_sriov_vf(adev))
224 			ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + i *
225 				(adev->vcn.inst[i].num_enc_rings + 1) + 1;
226 		else
227 			ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
228 		ring->vm_hub = AMDGPU_MMHUB0(0);
229 		sprintf(ring->name, "vcn_unified_%d", i);
230 
231 		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
232 						AMDGPU_RING_PRIO_0, &adev->vcn.inst[i].sched_score);
233 		if (r)
234 			return r;
235 
236 		vcn_v4_0_fw_shared_init(adev, i);
237 
238 		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
239 			adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_pause_dpg_mode;
240 	}
241 
242 	adev->vcn.supported_reset =
243 		amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
244 	if (!amdgpu_sriov_vf(adev))
245 		adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
246 
247 	if (amdgpu_sriov_vf(adev)) {
248 		r = amdgpu_virt_alloc_mm_table(adev);
249 		if (r)
250 			return r;
251 	}
252 
253 
254 	r = amdgpu_vcn_ras_sw_init(adev);
255 	if (r)
256 		return r;
257 
258 	/* Allocate memory for VCN IP Dump buffer */
259 	ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
260 	if (!ptr) {
261 		DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
262 		adev->vcn.ip_dump = NULL;
263 	} else {
264 		adev->vcn.ip_dump = ptr;
265 	}
266 
267 	r = amdgpu_vcn_sysfs_reset_mask_init(adev);
268 	if (r)
269 		return r;
270 
271 	return 0;
272 }
273 
274 /**
275  * vcn_v4_0_sw_fini - sw fini for VCN block
276  *
277  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
278  *
279  * VCN suspend and free up sw allocation
280  */
281 static int vcn_v4_0_sw_fini(struct amdgpu_ip_block *ip_block)
282 {
283 	struct amdgpu_device *adev = ip_block->adev;
284 	int i, r, idx;
285 
286 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
287 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
288 			volatile struct amdgpu_vcn4_fw_shared *fw_shared;
289 
290 			if (adev->vcn.harvest_config & (1 << i))
291 				continue;
292 
293 			fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
294 			fw_shared->present_flag_0 = 0;
295 			fw_shared->sq.is_enabled = 0;
296 		}
297 
298 		drm_dev_exit(idx);
299 	}
300 
301 	if (amdgpu_sriov_vf(adev))
302 		amdgpu_virt_free_mm_table(adev);
303 
304 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
305 		r = amdgpu_vcn_suspend(adev, i);
306 		if (r)
307 			return r;
308 	}
309 
310 	amdgpu_vcn_sysfs_reset_mask_fini(adev);
311 
312 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
313 		r = amdgpu_vcn_sw_fini(adev, i);
314 		if (r)
315 			return r;
316 	}
317 
318 	kfree(adev->vcn.ip_dump);
319 
320 	return 0;
321 }
322 
323 /**
324  * vcn_v4_0_hw_init - start and test VCN block
325  *
326  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
327  *
328  * Initialize the hardware, boot up the VCPU and do some testing
329  */
330 static int vcn_v4_0_hw_init(struct amdgpu_ip_block *ip_block)
331 {
332 	struct amdgpu_device *adev = ip_block->adev;
333 	struct amdgpu_ring *ring;
334 	int i, r;
335 
336 	if (amdgpu_sriov_vf(adev)) {
337 		r = vcn_v4_0_start_sriov(adev);
338 		if (r)
339 			return r;
340 
341 		for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
342 			if (adev->vcn.harvest_config & (1 << i))
343 				continue;
344 
345 			ring = &adev->vcn.inst[i].ring_enc[0];
346 			ring->wptr = 0;
347 			ring->wptr_old = 0;
348 			vcn_v4_0_unified_ring_set_wptr(ring);
349 			ring->sched.ready = true;
350 		}
351 	} else {
352 		for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
353 			if (adev->vcn.harvest_config & (1 << i))
354 				continue;
355 
356 			ring = &adev->vcn.inst[i].ring_enc[0];
357 
358 			adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
359 					((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i);
360 
361 			r = amdgpu_ring_test_helper(ring);
362 			if (r)
363 				return r;
364 		}
365 	}
366 
367 	return 0;
368 }
369 
370 /**
371  * vcn_v4_0_hw_fini - stop the hardware block
372  *
373  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
374  *
375  * Stop the VCN block, mark ring as not ready any more
376  */
377 static int vcn_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
378 {
379 	struct amdgpu_device *adev = ip_block->adev;
380 	int i;
381 
382 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
383 		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
384 
385 		if (adev->vcn.harvest_config & (1 << i))
386 			continue;
387 
388 		cancel_delayed_work_sync(&vinst->idle_work);
389 
390 		if (!amdgpu_sriov_vf(adev)) {
391 			if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
392 			    (vinst->cur_state != AMD_PG_STATE_GATE &&
393 				 RREG32_SOC15(VCN, i, regUVD_STATUS))) {
394 				vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
395 			}
396 		}
397 		if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
398 			amdgpu_irq_put(adev, &vinst->ras_poison_irq, 0);
399 	}
400 
401 	return 0;
402 }
403 
404 /**
405  * vcn_v4_0_suspend - suspend VCN block
406  *
407  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
408  *
409  * HW fini and suspend VCN block
410  */
411 static int vcn_v4_0_suspend(struct amdgpu_ip_block *ip_block)
412 {
413 	struct amdgpu_device *adev = ip_block->adev;
414 	int r, i;
415 
416 	r = vcn_v4_0_hw_fini(ip_block);
417 	if (r)
418 		return r;
419 
420 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
421 		r = amdgpu_vcn_suspend(ip_block->adev, i);
422 		if (r)
423 			return r;
424 	}
425 
426 	return 0;
427 }
428 
429 /**
430  * vcn_v4_0_resume - resume VCN block
431  *
432  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
433  *
434  * Resume firmware and hw init VCN block
435  */
436 static int vcn_v4_0_resume(struct amdgpu_ip_block *ip_block)
437 {
438 	struct amdgpu_device *adev = ip_block->adev;
439 	int r, i;
440 
441 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
442 		r = amdgpu_vcn_resume(ip_block->adev, i);
443 		if (r)
444 			return r;
445 	}
446 
447 	r = vcn_v4_0_hw_init(ip_block);
448 
449 	return r;
450 }
451 
452 /**
453  * vcn_v4_0_mc_resume - memory controller programming
454  *
455  * @vinst: VCN instance
456  *
457  * Let the VCN memory controller know it's offsets
458  */
459 static void vcn_v4_0_mc_resume(struct amdgpu_vcn_inst *vinst)
460 {
461 	struct amdgpu_device *adev = vinst->adev;
462 	int inst = vinst->inst;
463 	uint32_t offset, size;
464 	const struct common_firmware_header *hdr;
465 
466 	hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data;
467 	size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
468 
469 	/* cache window 0: fw */
470 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
471 		WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
472 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
473 		WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
474 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
475 		WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, 0);
476 		offset = 0;
477 	} else {
478 		WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
479 			lower_32_bits(adev->vcn.inst[inst].gpu_addr));
480 		WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
481 			upper_32_bits(adev->vcn.inst[inst].gpu_addr));
482 		offset = size;
483                 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
484 	}
485 	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE0, size);
486 
487 	/* cache window 1: stack */
488 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
489 		lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
490 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
491 		upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
492 	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET1, 0);
493 	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
494 
495 	/* cache window 2: context */
496 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
497 		lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
498 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
499 		upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
500 	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET2, 0);
501 	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
502 
503 	/* non-cache window */
504 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
505 		lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
506 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
507 		upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
508 	WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
509 	WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_SIZE0,
510 		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
511 }
512 
513 /**
514  * vcn_v4_0_mc_resume_dpg_mode - memory controller programming for dpg mode
515  *
516  * @vinst: VCN instance
517  * @indirect: indirectly write sram
518  *
519  * Let the VCN memory controller know it's offsets with dpg mode
520  */
521 static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
522 					bool indirect)
523 {
524 	struct amdgpu_device *adev = vinst->adev;
525 	int inst_idx = vinst->inst;
526 	uint32_t offset, size;
527 	const struct common_firmware_header *hdr;
528 	hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
529 	size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
530 
531 	/* cache window 0: fw */
532 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
533 		if (!indirect) {
534 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
535 				VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
536 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
537 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
538 				VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
539 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
540 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
541 				VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
542 		} else {
543 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
544 				VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
545 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
546 				VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
547 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
548 				VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
549 		}
550 		offset = 0;
551 	} else {
552 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
553 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
554 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
555 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
556 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
557 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
558 		offset = size;
559 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
560 			VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0),
561 			AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
562 	}
563 
564 	if (!indirect)
565 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
566 			VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
567 	else
568 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
569 			VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
570 
571 	/* cache window 1: stack */
572 	if (!indirect) {
573 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
574 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
575 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
576 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
577 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
578 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
579 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
580 			VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
581 	} else {
582 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
583 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
584 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
585 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
586 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
587 			VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
588 	}
589 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
590 			VCN, inst_idx, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
591 
592 	/* cache window 2: context */
593 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
594 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
595 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
596 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
597 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
598 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
599 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
600 			VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
601 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
602 			VCN, inst_idx, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
603 
604 	/* non-cache window */
605 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
606 			VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
607 			lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
608 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
609 			VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
610 			upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
611 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
612 			VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
613 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
614 			VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0),
615 			AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
616 
617 	/* VCN global tiling registers */
618 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
619 			VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
620 			adev->gfx.config.gb_addr_config, 0, indirect);
621 }
622 
623 /**
624  * vcn_v4_0_disable_static_power_gating - disable VCN static power gating
625  *
626  * @vinst: VCN instance
627  *
628  * Disable static power gating for VCN block
629  */
630 static void vcn_v4_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
631 {
632 	struct amdgpu_device *adev = vinst->adev;
633 	int inst = vinst->inst;
634 	uint32_t data = 0;
635 
636 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
637 		data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
638 			| 1 << UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG__SHIFT
639 			| 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
640 			| 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
641 			| 2 << UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG__SHIFT
642 			| 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
643 			| 2 << UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG__SHIFT
644 			| 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
645 			| 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
646 			| 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
647 			| 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
648 			| 2 << UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG__SHIFT
649 			| 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
650 			| 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
651 
652 		WREG32_SOC15(VCN, inst, regUVD_PGFSM_CONFIG, data);
653 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_PGFSM_STATUS,
654 			UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0, 0x3F3FFFFF);
655 	} else {
656 		uint32_t value;
657 
658 		value = (inst) ? 0x2200800 : 0;
659 		data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
660 			| 1 << UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG__SHIFT
661 			| 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
662 			| 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
663 			| 1 << UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG__SHIFT
664 			| 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
665 			| 1 << UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG__SHIFT
666 			| 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
667 			| 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
668 			| 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
669 			| 1 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
670 			| 1 << UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG__SHIFT
671 			| 1 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
672 			| 1 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
673 
674                 WREG32_SOC15(VCN, inst, regUVD_PGFSM_CONFIG, data);
675                 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_PGFSM_STATUS, value,  0x3F3FFFFF);
676         }
677 
678         data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
679         data &= ~0x103;
680         if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
681                 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
682                         UVD_POWER_STATUS__UVD_PG_EN_MASK;
683 
684         WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
685 
686         return;
687 }
688 
689 /**
690  * vcn_v4_0_enable_static_power_gating - enable VCN static power gating
691  *
692  * @vinst: VCN instance
693  *
694  * Enable static power gating for VCN block
695  */
696 static void vcn_v4_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
697 {
698 	struct amdgpu_device *adev = vinst->adev;
699 	int inst = vinst->inst;
700 	uint32_t data;
701 
702 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
703 		/* Before power off, this indicator has to be turned on */
704 		data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
705 		data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
706 		data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
707 		WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
708 
709 		data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
710 			| 2 << UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG__SHIFT
711 			| 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
712 			| 2 << UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG__SHIFT
713 			| 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
714 			| 2 << UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG__SHIFT
715 			| 2 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
716 			| 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
717 			| 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
718 			| 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
719 			| 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
720 			| 2 << UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG__SHIFT
721 			| 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
722 			| 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
723 		WREG32_SOC15(VCN, inst, regUVD_PGFSM_CONFIG, data);
724 
725 		data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
726 			| 2 << UVD_PGFSM_STATUS__UVDS_PWR_STATUS__SHIFT
727 			| 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
728 			| 2 << UVD_PGFSM_STATUS__UVDTC_PWR_STATUS__SHIFT
729 			| 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
730 			| 2 << UVD_PGFSM_STATUS__UVDTA_PWR_STATUS__SHIFT
731 			| 2 << UVD_PGFSM_STATUS__UVDLM_PWR_STATUS__SHIFT
732 			| 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
733 			| 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
734 			| 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
735 			| 2 << UVD_PGFSM_STATUS__UVDAB_PWR_STATUS__SHIFT
736 			| 2 << UVD_PGFSM_STATUS__UVDTB_PWR_STATUS__SHIFT
737 			| 2 << UVD_PGFSM_STATUS__UVDNA_PWR_STATUS__SHIFT
738 			| 2 << UVD_PGFSM_STATUS__UVDNB_PWR_STATUS__SHIFT);
739 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_PGFSM_STATUS, data, 0x3F3FFFFF);
740 	}
741 
742         return;
743 }
744 
745 /**
746  * vcn_v4_0_disable_clock_gating - disable VCN clock gating
747  *
748  * @vinst: VCN instance
749  *
750  * Disable clock gating for VCN block
751  */
752 static void vcn_v4_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
753 {
754 	struct amdgpu_device *adev = vinst->adev;
755 	int inst = vinst->inst;
756 	uint32_t data;
757 
758 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
759 		return;
760 
761 	/* VCN disable CGC */
762 	data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL);
763 	data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
764 	data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
765 	data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
766 	WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data);
767 
768 	data = RREG32_SOC15(VCN, inst, regUVD_CGC_GATE);
769 	data &= ~(UVD_CGC_GATE__SYS_MASK
770 		| UVD_CGC_GATE__UDEC_MASK
771 		| UVD_CGC_GATE__MPEG2_MASK
772 		| UVD_CGC_GATE__REGS_MASK
773 		| UVD_CGC_GATE__RBC_MASK
774 		| UVD_CGC_GATE__LMI_MC_MASK
775 		| UVD_CGC_GATE__LMI_UMC_MASK
776 		| UVD_CGC_GATE__IDCT_MASK
777 		| UVD_CGC_GATE__MPRD_MASK
778 		| UVD_CGC_GATE__MPC_MASK
779 		| UVD_CGC_GATE__LBSI_MASK
780 		| UVD_CGC_GATE__LRBBM_MASK
781 		| UVD_CGC_GATE__UDEC_RE_MASK
782 		| UVD_CGC_GATE__UDEC_CM_MASK
783 		| UVD_CGC_GATE__UDEC_IT_MASK
784 		| UVD_CGC_GATE__UDEC_DB_MASK
785 		| UVD_CGC_GATE__UDEC_MP_MASK
786 		| UVD_CGC_GATE__WCB_MASK
787 		| UVD_CGC_GATE__VCPU_MASK
788 		| UVD_CGC_GATE__MMSCH_MASK);
789 
790 	WREG32_SOC15(VCN, inst, regUVD_CGC_GATE, data);
791 	SOC15_WAIT_ON_RREG(VCN, inst, regUVD_CGC_GATE, 0,  0xFFFFFFFF);
792 
793 	data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL);
794 	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
795 		| UVD_CGC_CTRL__UDEC_CM_MODE_MASK
796 		| UVD_CGC_CTRL__UDEC_IT_MODE_MASK
797 		| UVD_CGC_CTRL__UDEC_DB_MODE_MASK
798 		| UVD_CGC_CTRL__UDEC_MP_MODE_MASK
799 		| UVD_CGC_CTRL__SYS_MODE_MASK
800 		| UVD_CGC_CTRL__UDEC_MODE_MASK
801 		| UVD_CGC_CTRL__MPEG2_MODE_MASK
802 		| UVD_CGC_CTRL__REGS_MODE_MASK
803 		| UVD_CGC_CTRL__RBC_MODE_MASK
804 		| UVD_CGC_CTRL__LMI_MC_MODE_MASK
805 		| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
806 		| UVD_CGC_CTRL__IDCT_MODE_MASK
807 		| UVD_CGC_CTRL__MPRD_MODE_MASK
808 		| UVD_CGC_CTRL__MPC_MODE_MASK
809 		| UVD_CGC_CTRL__LBSI_MODE_MASK
810 		| UVD_CGC_CTRL__LRBBM_MODE_MASK
811 		| UVD_CGC_CTRL__WCB_MODE_MASK
812 		| UVD_CGC_CTRL__VCPU_MODE_MASK
813 		| UVD_CGC_CTRL__MMSCH_MODE_MASK);
814 	WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data);
815 
816 	data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_GATE);
817 	data |= (UVD_SUVD_CGC_GATE__SRE_MASK
818 		| UVD_SUVD_CGC_GATE__SIT_MASK
819 		| UVD_SUVD_CGC_GATE__SMP_MASK
820 		| UVD_SUVD_CGC_GATE__SCM_MASK
821 		| UVD_SUVD_CGC_GATE__SDB_MASK
822 		| UVD_SUVD_CGC_GATE__SRE_H264_MASK
823 		| UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
824 		| UVD_SUVD_CGC_GATE__SIT_H264_MASK
825 		| UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
826 		| UVD_SUVD_CGC_GATE__SCM_H264_MASK
827 		| UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
828 		| UVD_SUVD_CGC_GATE__SDB_H264_MASK
829 		| UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
830 		| UVD_SUVD_CGC_GATE__SCLR_MASK
831 		| UVD_SUVD_CGC_GATE__UVD_SC_MASK
832 		| UVD_SUVD_CGC_GATE__ENT_MASK
833 		| UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
834 		| UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
835 		| UVD_SUVD_CGC_GATE__SITE_MASK
836 		| UVD_SUVD_CGC_GATE__SRE_VP9_MASK
837 		| UVD_SUVD_CGC_GATE__SCM_VP9_MASK
838 		| UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
839 		| UVD_SUVD_CGC_GATE__SDB_VP9_MASK
840 		| UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
841 	WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_GATE, data);
842 
843 	data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL);
844 	data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
845 		| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
846 		| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
847 		| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
848 		| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
849 		| UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
850 		| UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
851 		| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
852 		| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
853 		| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
854 	WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL, data);
855 }
856 
857 /**
858  * vcn_v4_0_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
859  *
860  * @vinst: VCN instance
861  * @sram_sel: sram select
862  * @indirect: indirectly write sram
863  *
864  * Disable clock gating for VCN block with dpg mode
865  */
866 static void vcn_v4_0_disable_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
867 						   uint8_t sram_sel,
868 						   uint8_t indirect)
869 {
870 	struct amdgpu_device *adev = vinst->adev;
871 	int inst_idx = vinst->inst;
872 	uint32_t reg_data = 0;
873 
874 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
875 		return;
876 
877 	/* enable sw clock gating control */
878 	reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
879 	reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
880 	reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
881 	reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
882 		 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
883 		 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
884 		 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
885 		 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
886 		 UVD_CGC_CTRL__SYS_MODE_MASK |
887 		 UVD_CGC_CTRL__UDEC_MODE_MASK |
888 		 UVD_CGC_CTRL__MPEG2_MODE_MASK |
889 		 UVD_CGC_CTRL__REGS_MODE_MASK |
890 		 UVD_CGC_CTRL__RBC_MODE_MASK |
891 		 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
892 		 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
893 		 UVD_CGC_CTRL__IDCT_MODE_MASK |
894 		 UVD_CGC_CTRL__MPRD_MODE_MASK |
895 		 UVD_CGC_CTRL__MPC_MODE_MASK |
896 		 UVD_CGC_CTRL__LBSI_MODE_MASK |
897 		 UVD_CGC_CTRL__LRBBM_MODE_MASK |
898 		 UVD_CGC_CTRL__WCB_MODE_MASK |
899 		 UVD_CGC_CTRL__VCPU_MODE_MASK);
900 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
901 		VCN, inst_idx, regUVD_CGC_CTRL), reg_data, sram_sel, indirect);
902 
903 	/* turn off clock gating */
904 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
905 		VCN, inst_idx, regUVD_CGC_GATE), 0, sram_sel, indirect);
906 
907 	/* turn on SUVD clock gating */
908 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
909 		VCN, inst_idx, regUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
910 
911 	/* turn on sw mode in UVD_SUVD_CGC_CTRL */
912 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
913 		VCN, inst_idx, regUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
914 }
915 
916 /**
917  * vcn_v4_0_enable_clock_gating - enable VCN clock gating
918  *
919  * @vinst: VCN instance
920  *
921  * Enable clock gating for VCN block
922  */
923 static void vcn_v4_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
924 {
925 	struct amdgpu_device *adev = vinst->adev;
926 	int inst = vinst->inst;
927 	uint32_t data;
928 
929 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
930 		return;
931 
932 	/* enable VCN CGC */
933 	data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL);
934 	data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
935 	data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
936 	data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
937 	WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data);
938 
939 	data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL);
940 	data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
941 		| UVD_CGC_CTRL__UDEC_CM_MODE_MASK
942 		| UVD_CGC_CTRL__UDEC_IT_MODE_MASK
943 		| UVD_CGC_CTRL__UDEC_DB_MODE_MASK
944 		| UVD_CGC_CTRL__UDEC_MP_MODE_MASK
945 		| UVD_CGC_CTRL__SYS_MODE_MASK
946 		| UVD_CGC_CTRL__UDEC_MODE_MASK
947 		| UVD_CGC_CTRL__MPEG2_MODE_MASK
948 		| UVD_CGC_CTRL__REGS_MODE_MASK
949 		| UVD_CGC_CTRL__RBC_MODE_MASK
950 		| UVD_CGC_CTRL__LMI_MC_MODE_MASK
951 		| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
952 		| UVD_CGC_CTRL__IDCT_MODE_MASK
953 		| UVD_CGC_CTRL__MPRD_MODE_MASK
954 		| UVD_CGC_CTRL__MPC_MODE_MASK
955 		| UVD_CGC_CTRL__LBSI_MODE_MASK
956 		| UVD_CGC_CTRL__LRBBM_MODE_MASK
957 		| UVD_CGC_CTRL__WCB_MODE_MASK
958 		| UVD_CGC_CTRL__VCPU_MODE_MASK
959 		| UVD_CGC_CTRL__MMSCH_MODE_MASK);
960 	WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data);
961 
962 	data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL);
963 	data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
964 		| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
965 		| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
966 		| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
967 		| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
968 		| UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
969 		| UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
970 		| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
971 		| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
972 		| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
973 	WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL, data);
974 }
975 
976 static void vcn_v4_0_enable_ras(struct amdgpu_vcn_inst *vinst,
977 				bool indirect)
978 {
979 	struct amdgpu_device *adev = vinst->adev;
980 	int inst_idx = vinst->inst;
981 	uint32_t tmp;
982 
983 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
984 		return;
985 
986 	tmp = VCN_RAS_CNTL__VCPU_VCODEC_REARM_MASK |
987 	      VCN_RAS_CNTL__VCPU_VCODEC_IH_EN_MASK |
988 	      VCN_RAS_CNTL__VCPU_VCODEC_PMI_EN_MASK |
989 	      VCN_RAS_CNTL__VCPU_VCODEC_STALL_EN_MASK;
990 	WREG32_SOC15_DPG_MODE(inst_idx,
991 			      SOC15_DPG_MODE_OFFSET(VCN, 0, regVCN_RAS_CNTL),
992 			      tmp, 0, indirect);
993 
994 	tmp = UVD_SYS_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
995 	WREG32_SOC15_DPG_MODE(inst_idx,
996 			      SOC15_DPG_MODE_OFFSET(VCN, 0, regUVD_SYS_INT_EN),
997 			      tmp, 0, indirect);
998 }
999 
1000 /**
1001  * vcn_v4_0_start_dpg_mode - VCN start with dpg mode
1002  *
1003  * @vinst: VCN instance
1004  * @indirect: indirectly write sram
1005  *
1006  * Start VCN block with dpg mode
1007  */
1008 static int vcn_v4_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
1009 {
1010 	struct amdgpu_device *adev = vinst->adev;
1011 	int inst_idx = vinst->inst;
1012 	volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
1013 	struct amdgpu_ring *ring;
1014 	uint32_t tmp;
1015 	int ret;
1016 
1017 	/* disable register anti-hang mechanism */
1018 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
1019 		~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1020 	/* enable dynamic power gating mode */
1021 	tmp = RREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS);
1022 	tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
1023 	tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
1024 	WREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS, tmp);
1025 
1026 	if (indirect)
1027 		adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
1028 
1029 	/* enable clock gating */
1030 	vcn_v4_0_disable_clock_gating_dpg_mode(vinst, 0, indirect);
1031 
1032 	/* enable VCPU clock */
1033 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
1034 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK;
1035 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1036 		VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect);
1037 
1038 	/* disable master interupt */
1039 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1040 		VCN, inst_idx, regUVD_MASTINT_EN), 0, 0, indirect);
1041 
1042 	/* setup regUVD_LMI_CTRL */
1043 	tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1044 		UVD_LMI_CTRL__REQ_MODE_MASK |
1045 		UVD_LMI_CTRL__CRC_RESET_MASK |
1046 		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1047 		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1048 		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1049 		(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1050 		0x00100000L);
1051 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1052 		VCN, inst_idx, regUVD_LMI_CTRL), tmp, 0, indirect);
1053 
1054 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1055 		VCN, inst_idx, regUVD_MPC_CNTL),
1056 		0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
1057 
1058 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1059 		VCN, inst_idx, regUVD_MPC_SET_MUXA0),
1060 		((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1061 		 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1062 		 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1063 		 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
1064 
1065 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1066 		VCN, inst_idx, regUVD_MPC_SET_MUXB0),
1067 		 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1068 		 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1069 		 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1070 		 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
1071 
1072 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1073 		VCN, inst_idx, regUVD_MPC_SET_MUX),
1074 		((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1075 		 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1076 		 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
1077 
1078 	vcn_v4_0_mc_resume_dpg_mode(vinst, indirect);
1079 
1080 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
1081 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
1082 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1083 		VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect);
1084 
1085 	/* enable LMI MC and UMC channels */
1086 	tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
1087 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1088 		VCN, inst_idx, regUVD_LMI_CTRL2), tmp, 0, indirect);
1089 
1090 	vcn_v4_0_enable_ras(vinst, indirect);
1091 
1092 	/* enable master interrupt */
1093 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1094 		VCN, inst_idx, regUVD_MASTINT_EN),
1095 		UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
1096 
1097 
1098 	if (indirect) {
1099 		ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
1100 		if (ret) {
1101 			dev_err(adev->dev, "vcn sram load failed %d\n", ret);
1102 			return ret;
1103 		}
1104 	}
1105 
1106 	ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1107 
1108 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO, ring->gpu_addr);
1109 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1110 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE, ring->ring_size / 4);
1111 
1112 	tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
1113 	tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
1114 	WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
1115 	fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
1116 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR, 0);
1117 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, 0);
1118 
1119 	tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR);
1120 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, tmp);
1121 	ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
1122 
1123 	tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
1124 	tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
1125 	WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
1126 	fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
1127 
1128 	WREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL,
1129 			ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
1130 			VCN_RB1_DB_CTRL__EN_MASK);
1131 
1132 	/* Keeping one read-back to ensure all register writes are done,
1133 	 * otherwise it may introduce race conditions.
1134 	 */
1135 	RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
1136 
1137 	return 0;
1138 }
1139 
1140 
1141 /**
1142  * vcn_v4_0_start - VCN start
1143  *
1144  * @vinst: VCN instance
1145  *
1146  * Start VCN block
1147  */
1148 static int vcn_v4_0_start(struct amdgpu_vcn_inst *vinst)
1149 {
1150 	struct amdgpu_device *adev = vinst->adev;
1151 	int i = vinst->inst;
1152 	volatile struct amdgpu_vcn4_fw_shared *fw_shared;
1153 	struct amdgpu_ring *ring;
1154 	uint32_t tmp;
1155 	int j, k, r;
1156 
1157 	if (adev->vcn.harvest_config & (1 << i))
1158 		return 0;
1159 
1160 	if (adev->pm.dpm_enabled)
1161 		amdgpu_dpm_enable_vcn(adev, true, i);
1162 
1163 	fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1164 
1165 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1166 		return vcn_v4_0_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
1167 
1168 	/* disable VCN power gating */
1169 	vcn_v4_0_disable_static_power_gating(vinst);
1170 
1171 	/* set VCN status busy */
1172 	tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
1173 	WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
1174 
1175 	/*SW clock gating */
1176 	vcn_v4_0_disable_clock_gating(vinst);
1177 
1178 	/* enable VCPU clock */
1179 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
1180 		 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
1181 
1182 	/* disable master interrupt */
1183 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
1184 		 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1185 
1186 	/* enable LMI MC and UMC channels */
1187 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
1188 		 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1189 
1190 	tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1191 	tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1192 	tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1193 	WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1194 
1195 	/* setup regUVD_LMI_CTRL */
1196 	tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
1197 	WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
1198 		     UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1199 		     UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1200 		     UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1201 		     UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1202 
1203 	/* setup regUVD_MPC_CNTL */
1204 	tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL);
1205 	tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1206 	tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1207 	WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp);
1208 
1209 	/* setup UVD_MPC_SET_MUXA0 */
1210 	WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0,
1211 		     ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1212 		      (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1213 		      (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1214 		      (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1215 
1216 	/* setup UVD_MPC_SET_MUXB0 */
1217 	WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0,
1218 		     ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1219 		      (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1220 		      (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1221 		      (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1222 
1223 	/* setup UVD_MPC_SET_MUX */
1224 	WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX,
1225 		     ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1226 		      (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1227 		      (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1228 
1229 	vcn_v4_0_mc_resume(vinst);
1230 
1231 	/* VCN global tiling registers */
1232 	WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
1233 		     adev->gfx.config.gb_addr_config);
1234 
1235 	/* unblock VCPU register access */
1236 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
1237 		 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1238 
1239 	/* release VCPU reset to boot */
1240 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
1241 		 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1242 
1243 	for (j = 0; j < 10; ++j) {
1244 		uint32_t status;
1245 
1246 		for (k = 0; k < 100; ++k) {
1247 			status = RREG32_SOC15(VCN, i, regUVD_STATUS);
1248 			if (status & 2)
1249 				break;
1250 			mdelay(10);
1251 			if (amdgpu_emu_mode == 1)
1252 				msleep(1);
1253 		}
1254 
1255 		if (amdgpu_emu_mode == 1) {
1256 			r = -1;
1257 			if (status & 2) {
1258 				r = 0;
1259 				break;
1260 			}
1261 		} else {
1262 			r = 0;
1263 			if (status & 2)
1264 				break;
1265 
1266 			dev_err(adev->dev, "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
1267 			WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
1268 				 UVD_VCPU_CNTL__BLK_RST_MASK,
1269 				 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1270 			mdelay(10);
1271 			WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
1272 				 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1273 
1274 			mdelay(10);
1275 			r = -1;
1276 		}
1277 	}
1278 
1279 	if (r) {
1280 		dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
1281 		return r;
1282 	}
1283 
1284 	/* enable master interrupt */
1285 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
1286 		 UVD_MASTINT_EN__VCPU_EN_MASK,
1287 		 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1288 
1289 	/* clear the busy bit of VCN_STATUS */
1290 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
1291 		 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1292 
1293 	ring = &adev->vcn.inst[i].ring_enc[0];
1294 	WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
1295 		     ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
1296 		     VCN_RB1_DB_CTRL__EN_MASK);
1297 
1298 	WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
1299 	WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1300 	WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
1301 
1302 	tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
1303 	tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
1304 	WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
1305 	fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
1306 	WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
1307 	WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
1308 
1309 	tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
1310 	WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
1311 	ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
1312 
1313 	tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
1314 	tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
1315 	WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
1316 	fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
1317 
1318 	/* Keeping one read-back to ensure all register writes are done,
1319 	 * otherwise it may introduce race conditions.
1320 	 */
1321 	RREG32_SOC15(VCN, i, regUVD_STATUS);
1322 
1323 	return 0;
1324 }
1325 
1326 static int vcn_v4_0_init_ring_metadata(struct amdgpu_device *adev, uint32_t vcn_inst, struct amdgpu_ring *ring_enc)
1327 {
1328 	struct amdgpu_vcn_rb_metadata *rb_metadata = NULL;
1329 	uint8_t *rb_ptr = (uint8_t *)ring_enc->ring;
1330 
1331 	rb_ptr += ring_enc->ring_size;
1332 	rb_metadata = (struct amdgpu_vcn_rb_metadata *)rb_ptr;
1333 
1334 	memset(rb_metadata, 0, sizeof(struct amdgpu_vcn_rb_metadata));
1335 	rb_metadata->size = sizeof(struct amdgpu_vcn_rb_metadata);
1336 	rb_metadata->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
1337 	rb_metadata->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_DECOUPLE_FLAG);
1338 	rb_metadata->version = 1;
1339 	rb_metadata->ring_id = vcn_inst & 0xFF;
1340 
1341 	return 0;
1342 }
1343 
1344 static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
1345 {
1346 	int i;
1347 	struct amdgpu_ring *ring_enc;
1348 	uint64_t cache_addr;
1349 	uint64_t rb_enc_addr;
1350 	uint64_t ctx_addr;
1351 	uint32_t param, resp, expected;
1352 	uint32_t offset, cache_size;
1353 	uint32_t tmp, timeout;
1354 
1355 	struct amdgpu_mm_table *table = &adev->virt.mm_table;
1356 	uint32_t *table_loc;
1357 	uint32_t table_size;
1358 	uint32_t size, size_dw;
1359 	uint32_t init_status;
1360 	uint32_t enabled_vcn;
1361 
1362 	struct mmsch_v4_0_cmd_direct_write
1363 		direct_wt = { {0} };
1364 	struct mmsch_v4_0_cmd_direct_read_modify_write
1365 		direct_rd_mod_wt = { {0} };
1366 	struct mmsch_v4_0_cmd_end end = { {0} };
1367 	struct mmsch_v4_0_init_header header;
1368 
1369 	volatile struct amdgpu_vcn4_fw_shared *fw_shared;
1370 	volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
1371 
1372 	direct_wt.cmd_header.command_type =
1373 		MMSCH_COMMAND__DIRECT_REG_WRITE;
1374 	direct_rd_mod_wt.cmd_header.command_type =
1375 		MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1376 	end.cmd_header.command_type =
1377 		MMSCH_COMMAND__END;
1378 
1379 	header.version = MMSCH_VERSION;
1380 	header.total_size = sizeof(struct mmsch_v4_0_init_header) >> 2;
1381 	for (i = 0; i < MMSCH_V4_0_VCN_INSTANCES; i++) {
1382 		header.inst[i].init_status = 0;
1383 		header.inst[i].table_offset = 0;
1384 		header.inst[i].table_size = 0;
1385 	}
1386 
1387 	table_loc = (uint32_t *)table->cpu_addr;
1388 	table_loc += header.total_size;
1389 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1390 		if (adev->vcn.harvest_config & (1 << i))
1391 			continue;
1392 
1393 		// Must re/init fw_shared at beginning
1394 		vcn_v4_0_fw_shared_init(adev, i);
1395 
1396 		table_size = 0;
1397 
1398 		MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, i,
1399 			regUVD_STATUS),
1400 			~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1401 
1402 		cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
1403 
1404 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1405 			MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1406 				regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1407 				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1408 			MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1409 				regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1410 				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1411 			offset = 0;
1412 			MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1413 				regUVD_VCPU_CACHE_OFFSET0),
1414 				0);
1415 		} else {
1416 			MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1417 				regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1418 				lower_32_bits(adev->vcn.inst[i].gpu_addr));
1419 			MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1420 				regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1421 				upper_32_bits(adev->vcn.inst[i].gpu_addr));
1422 			offset = cache_size;
1423 			MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1424 				regUVD_VCPU_CACHE_OFFSET0),
1425 				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1426 		}
1427 
1428 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1429 			regUVD_VCPU_CACHE_SIZE0),
1430 			cache_size);
1431 
1432 		cache_addr = adev->vcn.inst[i].gpu_addr + offset;
1433 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1434 			regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1435 			lower_32_bits(cache_addr));
1436 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1437 			regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1438 			upper_32_bits(cache_addr));
1439 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1440 			regUVD_VCPU_CACHE_OFFSET1),
1441 			0);
1442 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1443 			regUVD_VCPU_CACHE_SIZE1),
1444 			AMDGPU_VCN_STACK_SIZE);
1445 
1446 		cache_addr = adev->vcn.inst[i].gpu_addr + offset +
1447 			AMDGPU_VCN_STACK_SIZE;
1448 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1449 			regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1450 			lower_32_bits(cache_addr));
1451 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1452 			regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1453 			upper_32_bits(cache_addr));
1454 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1455 			regUVD_VCPU_CACHE_OFFSET2),
1456 			0);
1457 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1458 			regUVD_VCPU_CACHE_SIZE2),
1459 			AMDGPU_VCN_CONTEXT_SIZE);
1460 
1461 		fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1462 		rb_setup = &fw_shared->rb_setup;
1463 
1464 		ring_enc = &adev->vcn.inst[i].ring_enc[0];
1465 		ring_enc->wptr = 0;
1466 		rb_enc_addr = ring_enc->gpu_addr;
1467 
1468 		rb_setup->is_rb_enabled_flags |= RB_ENABLED;
1469 		fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
1470 
1471 		if (amdgpu_sriov_is_vcn_rb_decouple(adev)) {
1472 			vcn_v4_0_init_ring_metadata(adev, i, ring_enc);
1473 
1474 			memset((void *)&rb_setup->rb_info, 0, sizeof(struct amdgpu_vcn_rb_setup_info) * MAX_NUM_VCN_RB_SETUP);
1475 			if (!(adev->vcn.harvest_config & (1 << 0))) {
1476 				rb_setup->rb_info[0].rb_addr_lo = lower_32_bits(adev->vcn.inst[0].ring_enc[0].gpu_addr);
1477 				rb_setup->rb_info[0].rb_addr_hi = upper_32_bits(adev->vcn.inst[0].ring_enc[0].gpu_addr);
1478 				rb_setup->rb_info[0].rb_size = adev->vcn.inst[0].ring_enc[0].ring_size / 4;
1479 			}
1480 			if (!(adev->vcn.harvest_config & (1 << 1))) {
1481 				rb_setup->rb_info[2].rb_addr_lo = lower_32_bits(adev->vcn.inst[1].ring_enc[0].gpu_addr);
1482 				rb_setup->rb_info[2].rb_addr_hi = upper_32_bits(adev->vcn.inst[1].ring_enc[0].gpu_addr);
1483 				rb_setup->rb_info[2].rb_size = adev->vcn.inst[1].ring_enc[0].ring_size / 4;
1484 			}
1485 			fw_shared->decouple.is_enabled = 1;
1486 			fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_DECOUPLE_FLAG);
1487 		} else {
1488 			rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr);
1489 			rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr);
1490 			rb_setup->rb_size = ring_enc->ring_size / 4;
1491 		}
1492 
1493 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1494 			regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
1495 			lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
1496 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1497 			regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
1498 			upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
1499 		MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1500 			regUVD_VCPU_NONCACHE_SIZE0),
1501 			AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
1502 
1503 		/* add end packet */
1504 		MMSCH_V4_0_INSERT_END();
1505 
1506 		/* refine header */
1507 		header.inst[i].init_status = 0;
1508 		header.inst[i].table_offset = header.total_size;
1509 		header.inst[i].table_size = table_size;
1510 		header.total_size += table_size;
1511 	}
1512 
1513 	/* Update init table header in memory */
1514 	size = sizeof(struct mmsch_v4_0_init_header);
1515 	table_loc = (uint32_t *)table->cpu_addr;
1516 	memcpy((void *)table_loc, &header, size);
1517 
1518 	/* message MMSCH (in VCN[0]) to initialize this client
1519 	 * 1, write to mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
1520 	 * of memory descriptor location
1521 	 */
1522 	ctx_addr = table->gpu_addr;
1523 	WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
1524 	WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
1525 
1526 	/* 2, update vmid of descriptor */
1527 	tmp = RREG32_SOC15(VCN, 0, regMMSCH_VF_VMID);
1528 	tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1529 	/* use domain0 for MM scheduler */
1530 	tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1531 	WREG32_SOC15(VCN, 0, regMMSCH_VF_VMID, tmp);
1532 
1533 	/* 3, notify mmsch about the size of this descriptor */
1534 	size = header.total_size;
1535 	WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_SIZE, size);
1536 
1537 	/* 4, set resp to zero */
1538 	WREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_RESP, 0);
1539 
1540 	/* 5, kick off the initialization and wait until
1541 	 * MMSCH_VF_MAILBOX_RESP becomes non-zero
1542 	 */
1543 	param = 0x00000001;
1544 	WREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_HOST, param);
1545 	tmp = 0;
1546 	timeout = 1000;
1547 	resp = 0;
1548 	expected = MMSCH_VF_MAILBOX_RESP__OK;
1549 	while (resp != expected) {
1550 		resp = RREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_RESP);
1551 		if (resp != 0)
1552 			break;
1553 
1554 		udelay(10);
1555 		tmp = tmp + 10;
1556 		if (tmp >= timeout) {
1557 			DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
1558 				" waiting for regMMSCH_VF_MAILBOX_RESP "\
1559 				"(expected=0x%08x, readback=0x%08x)\n",
1560 				tmp, expected, resp);
1561 			return -EBUSY;
1562 		}
1563 	}
1564 	enabled_vcn = amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 0) ? 1 : 0;
1565 	init_status = ((struct mmsch_v4_0_init_header *)(table_loc))->inst[enabled_vcn].init_status;
1566 	if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE
1567 	&& init_status != MMSCH_VF_ENGINE_STATUS__PASS)
1568 		DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init "\
1569 			"status for VCN%x: 0x%x\n", resp, enabled_vcn, init_status);
1570 
1571 	return 0;
1572 }
1573 
1574 /**
1575  * vcn_v4_0_stop_dpg_mode - VCN stop with dpg mode
1576  *
1577  * @vinst: VCN instance
1578  *
1579  * Stop VCN block with dpg mode
1580  */
1581 static void vcn_v4_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
1582 {
1583 	struct amdgpu_device *adev = vinst->adev;
1584 	int inst_idx = vinst->inst;
1585 	struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
1586 	uint32_t tmp;
1587 
1588 	vcn_v4_0_pause_dpg_mode(vinst, &state);
1589 	/* Wait for power status to be 1 */
1590 	SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
1591 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1592 
1593 	/* wait for read ptr to be equal to write ptr */
1594 	tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
1595 	SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1596 
1597 	SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
1598 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1599 
1600 	/* disable dynamic power gating mode */
1601 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
1602 		~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1603 
1604 	/* Keeping one read-back to ensure all register writes are done,
1605 	 * otherwise it may introduce race conditions.
1606 	 */
1607 	RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
1608 }
1609 
1610 /**
1611  * vcn_v4_0_stop - VCN stop
1612  *
1613  * @vinst: VCN instance
1614  *
1615  * Stop VCN block
1616  */
1617 static int vcn_v4_0_stop(struct amdgpu_vcn_inst *vinst)
1618 {
1619 	struct amdgpu_device *adev = vinst->adev;
1620 	int i = vinst->inst;
1621 	volatile struct amdgpu_vcn4_fw_shared *fw_shared;
1622 	uint32_t tmp;
1623 	int r = 0;
1624 
1625 	if (adev->vcn.harvest_config & (1 << i))
1626 		return 0;
1627 
1628 	fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1629 	fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
1630 
1631 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1632 		vcn_v4_0_stop_dpg_mode(vinst);
1633 		r = 0;
1634 		goto done;
1635 	}
1636 
1637 	/* wait for vcn idle */
1638 	r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1639 	if (r)
1640 		goto done;
1641 
1642 	tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1643 		UVD_LMI_STATUS__READ_CLEAN_MASK |
1644 		UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1645 		UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1646 	r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
1647 	if (r)
1648 		goto done;
1649 
1650 	/* disable LMI UMC channel */
1651 	tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
1652 	tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1653 	WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
1654 	tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1655 		UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1656 	r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
1657 	if (r)
1658 		goto done;
1659 
1660 	/* block VCPU register access */
1661 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
1662 		 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1663 		 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1664 
1665 	/* reset VCPU */
1666 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
1667 		 UVD_VCPU_CNTL__BLK_RST_MASK,
1668 		 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1669 
1670 	/* disable VCPU clock */
1671 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
1672 		 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1673 
1674 	/* apply soft reset */
1675 	tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1676 	tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1677 	WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1678 	tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1679 	tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1680 	WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1681 
1682 	/* clear status */
1683 	WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
1684 
1685 	/* apply HW clock gating */
1686 	vcn_v4_0_enable_clock_gating(vinst);
1687 
1688 	/* enable VCN power gating */
1689 	vcn_v4_0_enable_static_power_gating(vinst);
1690 
1691 	/* Keeping one read-back to ensure all register writes are done,
1692 	 * otherwise it may introduce race conditions.
1693 	 */
1694 	RREG32_SOC15(VCN, i, regUVD_STATUS);
1695 
1696 done:
1697 	if (adev->pm.dpm_enabled)
1698 		amdgpu_dpm_enable_vcn(adev, false, i);
1699 
1700 	return 0;
1701 }
1702 
1703 /**
1704  * vcn_v4_0_pause_dpg_mode - VCN pause with dpg mode
1705  *
1706  * @vinst: VCN instance
1707  * @new_state: pause state
1708  *
1709  * Pause dpg mode for VCN block
1710  */
1711 static int vcn_v4_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
1712 				   struct dpg_pause_state *new_state)
1713 {
1714 	struct amdgpu_device *adev = vinst->adev;
1715 	int inst_idx = vinst->inst;
1716 	uint32_t reg_data = 0;
1717 	int ret_code;
1718 
1719 	/* pause/unpause if state is changed */
1720 	if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1721 		DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d",
1722 			adev->vcn.inst[inst_idx].pause_state.fw_based,	new_state->fw_based);
1723 		reg_data = RREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE) &
1724 			(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1725 
1726 		if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1727 			ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 0x1,
1728 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1729 
1730 			if (!ret_code) {
1731 				/* pause DPG */
1732 				reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1733 				WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
1734 
1735 				/* wait for ACK */
1736 				SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_DPG_PAUSE,
1737 					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1738 					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1739 
1740 				SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS,
1741 					UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1742 			}
1743 		} else {
1744 			/* unpause dpg, no need to wait */
1745 			reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1746 			WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
1747 		}
1748 		adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1749 	}
1750 
1751 	return 0;
1752 }
1753 
1754 /**
1755  * vcn_v4_0_unified_ring_get_rptr - get unified read pointer
1756  *
1757  * @ring: amdgpu_ring pointer
1758  *
1759  * Returns the current hardware unified read pointer
1760  */
1761 static uint64_t vcn_v4_0_unified_ring_get_rptr(struct amdgpu_ring *ring)
1762 {
1763 	struct amdgpu_device *adev = ring->adev;
1764 
1765 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1766 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1767 
1768 	return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR);
1769 }
1770 
1771 /**
1772  * vcn_v4_0_unified_ring_get_wptr - get unified write pointer
1773  *
1774  * @ring: amdgpu_ring pointer
1775  *
1776  * Returns the current hardware unified write pointer
1777  */
1778 static uint64_t vcn_v4_0_unified_ring_get_wptr(struct amdgpu_ring *ring)
1779 {
1780 	struct amdgpu_device *adev = ring->adev;
1781 
1782 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1783 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1784 
1785 	if (ring->use_doorbell)
1786 		return *ring->wptr_cpu_addr;
1787 	else
1788 		return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR);
1789 }
1790 
1791 /**
1792  * vcn_v4_0_unified_ring_set_wptr - set enc write pointer
1793  *
1794  * @ring: amdgpu_ring pointer
1795  *
1796  * Commits the enc write pointer to the hardware
1797  */
1798 static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring)
1799 {
1800 	struct amdgpu_device *adev = ring->adev;
1801 
1802 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1803 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1804 
1805 	if (ring->use_doorbell) {
1806 		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1807 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1808 	} else {
1809 		WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR, lower_32_bits(ring->wptr));
1810 	}
1811 }
1812 
1813 static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser *p,
1814 				struct amdgpu_job *job)
1815 {
1816 	struct drm_gpu_scheduler **scheds;
1817 
1818 	/* The create msg must be in the first IB submitted */
1819 	if (atomic_read(&job->base.entity->fence_seq))
1820 		return -EINVAL;
1821 
1822 	/* if VCN0 is harvested, we can't support AV1 */
1823 	if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
1824 		return -EINVAL;
1825 
1826 	scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC]
1827 		[AMDGPU_RING_PRIO_0].sched;
1828 	drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
1829 	return 0;
1830 }
1831 
1832 static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
1833 			    uint64_t addr)
1834 {
1835 	struct ttm_operation_ctx ctx = { false, false };
1836 	struct amdgpu_bo_va_mapping *map;
1837 	uint32_t *msg, num_buffers;
1838 	struct amdgpu_bo *bo;
1839 	uint64_t start, end;
1840 	unsigned int i;
1841 	void *ptr;
1842 	int r;
1843 
1844 	addr &= AMDGPU_GMC_HOLE_MASK;
1845 	r = amdgpu_cs_find_mapping(p, addr, &bo, &map);
1846 	if (r) {
1847 		DRM_ERROR("Can't find BO for addr 0x%08llx\n", addr);
1848 		return r;
1849 	}
1850 
1851 	start = map->start * AMDGPU_GPU_PAGE_SIZE;
1852 	end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE;
1853 	if (addr & 0x7) {
1854 		DRM_ERROR("VCN messages must be 8 byte aligned!\n");
1855 		return -EINVAL;
1856 	}
1857 
1858 	bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1859 	amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
1860 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1861 	if (r) {
1862 		DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r);
1863 		return r;
1864 	}
1865 
1866 	r = amdgpu_bo_kmap(bo, &ptr);
1867 	if (r) {
1868 		DRM_ERROR("Failed mapping the VCN message (%d)!\n", r);
1869 		return r;
1870 	}
1871 
1872 	msg = ptr + addr - start;
1873 
1874 	/* Check length */
1875 	if (msg[1] > end - addr) {
1876 		r = -EINVAL;
1877 		goto out;
1878 	}
1879 
1880 	if (msg[3] != RDECODE_MSG_CREATE)
1881 		goto out;
1882 
1883 	num_buffers = msg[2];
1884 	for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) {
1885 		uint32_t offset, size, *create;
1886 
1887 		if (msg[0] != RDECODE_MESSAGE_CREATE)
1888 			continue;
1889 
1890 		offset = msg[1];
1891 		size = msg[2];
1892 
1893 		if (offset + size > end) {
1894 			r = -EINVAL;
1895 			goto out;
1896 		}
1897 
1898 		create = ptr + addr + offset - start;
1899 
1900 		/* H264, HEVC and VP9 can run on any instance */
1901 		if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
1902 			continue;
1903 
1904 		r = vcn_v4_0_limit_sched(p, job);
1905 		if (r)
1906 			goto out;
1907 	}
1908 
1909 out:
1910 	amdgpu_bo_kunmap(bo);
1911 	return r;
1912 }
1913 
1914 #define RADEON_VCN_ENGINE_TYPE_ENCODE			(0x00000002)
1915 #define RADEON_VCN_ENGINE_TYPE_DECODE			(0x00000003)
1916 
1917 #define RADEON_VCN_ENGINE_INFO				(0x30000001)
1918 #define RADEON_VCN_ENGINE_INFO_MAX_OFFSET		16
1919 
1920 #define RENCODE_ENCODE_STANDARD_AV1			2
1921 #define RENCODE_IB_PARAM_SESSION_INIT			0x00000003
1922 #define RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET	64
1923 
1924 /* return the offset in ib if id is found, -1 otherwise
1925  * to speed up the searching we only search upto max_offset
1926  */
1927 static int vcn_v4_0_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int max_offset)
1928 {
1929 	int i;
1930 
1931 	for (i = 0; i < ib->length_dw && i < max_offset && ib->ptr[i] >= 8; i += ib->ptr[i]/4) {
1932 		if (ib->ptr[i + 1] == id)
1933 			return i;
1934 	}
1935 	return -1;
1936 }
1937 
1938 static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1939 					   struct amdgpu_job *job,
1940 					   struct amdgpu_ib *ib)
1941 {
1942 	struct amdgpu_ring *ring = amdgpu_job_ring(job);
1943 	struct amdgpu_vcn_decode_buffer *decode_buffer;
1944 	uint64_t addr;
1945 	uint32_t val;
1946 	int idx;
1947 
1948 	/* The first instance can decode anything */
1949 	if (!ring->me)
1950 		return 0;
1951 
1952 	/* RADEON_VCN_ENGINE_INFO is at the top of ib block */
1953 	idx = vcn_v4_0_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO,
1954 			RADEON_VCN_ENGINE_INFO_MAX_OFFSET);
1955 	if (idx < 0) /* engine info is missing */
1956 		return 0;
1957 
1958 	val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */
1959 	if (val == RADEON_VCN_ENGINE_TYPE_DECODE) {
1960 		decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6];
1961 
1962 		if (!(decode_buffer->valid_buf_flag  & 0x1))
1963 			return 0;
1964 
1965 		addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
1966 			decode_buffer->msg_buffer_address_lo;
1967 		return vcn_v4_0_dec_msg(p, job, addr);
1968 	} else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) {
1969 		idx = vcn_v4_0_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT,
1970 			RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET);
1971 		if (idx >= 0 && ib->ptr[idx + 2] == RENCODE_ENCODE_STANDARD_AV1)
1972 			return vcn_v4_0_limit_sched(p, job);
1973 	}
1974 	return 0;
1975 }
1976 
1977 static int vcn_v4_0_ring_reset(struct amdgpu_ring *ring,
1978 			       unsigned int vmid,
1979 			       struct amdgpu_fence *timedout_fence)
1980 {
1981 	struct amdgpu_device *adev = ring->adev;
1982 	struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
1983 	int r;
1984 
1985 	amdgpu_ring_reset_helper_begin(ring, timedout_fence);
1986 	r = vcn_v4_0_stop(vinst);
1987 	if (r)
1988 		return r;
1989 	r = vcn_v4_0_start(vinst);
1990 	if (r)
1991 		return r;
1992 	return amdgpu_ring_reset_helper_end(ring, timedout_fence);
1993 }
1994 
1995 static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
1996 	.type = AMDGPU_RING_TYPE_VCN_ENC,
1997 	.align_mask = 0x3f,
1998 	.nop = VCN_ENC_CMD_NO_OP,
1999 	.extra_dw = sizeof(struct amdgpu_vcn_rb_metadata),
2000 	.get_rptr = vcn_v4_0_unified_ring_get_rptr,
2001 	.get_wptr = vcn_v4_0_unified_ring_get_wptr,
2002 	.set_wptr = vcn_v4_0_unified_ring_set_wptr,
2003 	.patch_cs_in_place = vcn_v4_0_ring_patch_cs_in_place,
2004 	.emit_frame_size =
2005 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2006 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
2007 		4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
2008 		5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
2009 		1, /* vcn_v2_0_enc_ring_insert_end */
2010 	.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
2011 	.emit_ib = vcn_v2_0_enc_ring_emit_ib,
2012 	.emit_fence = vcn_v2_0_enc_ring_emit_fence,
2013 	.emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
2014 	.test_ring = amdgpu_vcn_enc_ring_test_ring,
2015 	.test_ib = amdgpu_vcn_unified_ring_test_ib,
2016 	.insert_nop = amdgpu_ring_insert_nop,
2017 	.insert_end = vcn_v2_0_enc_ring_insert_end,
2018 	.pad_ib = amdgpu_ring_generic_pad_ib,
2019 	.begin_use = amdgpu_vcn_ring_begin_use,
2020 	.end_use = amdgpu_vcn_ring_end_use,
2021 	.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
2022 	.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
2023 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2024 	.reset = vcn_v4_0_ring_reset,
2025 };
2026 
2027 /**
2028  * vcn_v4_0_set_unified_ring_funcs - set unified ring functions
2029  *
2030  * @adev: amdgpu_device pointer
2031  *
2032  * Set unified ring functions
2033  */
2034 static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev)
2035 {
2036 	int i;
2037 
2038 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2039 		if (adev->vcn.harvest_config & (1 << i))
2040 			continue;
2041 
2042 		if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 2))
2043 			vcn_v4_0_unified_ring_vm_funcs.secure_submission_supported = true;
2044 
2045 		adev->vcn.inst[i].ring_enc[0].funcs =
2046 		       (const struct amdgpu_ring_funcs *)&vcn_v4_0_unified_ring_vm_funcs;
2047 		adev->vcn.inst[i].ring_enc[0].me = i;
2048 	}
2049 }
2050 
2051 /**
2052  * vcn_v4_0_is_idle - check VCN block is idle
2053  *
2054  * @ip_block: Pointer to the amdgpu_ip_block structure
2055  *
2056  * Check whether VCN block is idle
2057  */
2058 static bool vcn_v4_0_is_idle(struct amdgpu_ip_block *ip_block)
2059 {
2060 	struct amdgpu_device *adev = ip_block->adev;
2061 	int i, ret = 1;
2062 
2063 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2064 		if (adev->vcn.harvest_config & (1 << i))
2065 			continue;
2066 
2067 		ret &= (RREG32_SOC15(VCN, i, regUVD_STATUS) == UVD_STATUS__IDLE);
2068 	}
2069 
2070 	return ret;
2071 }
2072 
2073 /**
2074  * vcn_v4_0_wait_for_idle - wait for VCN block idle
2075  *
2076  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
2077  *
2078  * Wait for VCN block idle
2079  */
2080 static int vcn_v4_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
2081 {
2082 	struct amdgpu_device *adev = ip_block->adev;
2083 	int i, ret = 0;
2084 
2085 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2086 		if (adev->vcn.harvest_config & (1 << i))
2087 			continue;
2088 
2089 		ret = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE,
2090 			UVD_STATUS__IDLE);
2091 		if (ret)
2092 			return ret;
2093 	}
2094 
2095 	return ret;
2096 }
2097 
2098 /**
2099  * vcn_v4_0_set_clockgating_state - set VCN block clockgating state
2100  *
2101  * @ip_block: amdgpu_ip_block pointer
2102  * @state: clock gating state
2103  *
2104  * Set VCN block clockgating state
2105  */
2106 static int vcn_v4_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
2107 					  enum amd_clockgating_state state)
2108 {
2109 	struct amdgpu_device *adev = ip_block->adev;
2110 	bool enable = state == AMD_CG_STATE_GATE;
2111 	int i;
2112 
2113 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2114 		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
2115 
2116 		if (adev->vcn.harvest_config & (1 << i))
2117 			continue;
2118 
2119 		if (enable) {
2120 			if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE)
2121 				return -EBUSY;
2122 			vcn_v4_0_enable_clock_gating(vinst);
2123 		} else {
2124 			vcn_v4_0_disable_clock_gating(vinst);
2125 		}
2126 	}
2127 
2128 	return 0;
2129 }
2130 
2131 static int vcn_v4_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
2132 				 enum amd_powergating_state state)
2133 {
2134 	struct amdgpu_device *adev = vinst->adev;
2135 	int ret = 0;
2136 
2137 	/* for SRIOV, guest should not control VCN Power-gating
2138 	 * MMSCH FW should control Power-gating and clock-gating
2139 	 * guest should avoid touching CGC and PG
2140 	 */
2141 	if (amdgpu_sriov_vf(adev)) {
2142 		vinst->cur_state = AMD_PG_STATE_UNGATE;
2143 		return 0;
2144 	}
2145 
2146 	if (state == vinst->cur_state)
2147 		return 0;
2148 
2149 	if (state == AMD_PG_STATE_GATE)
2150 		ret = vcn_v4_0_stop(vinst);
2151 	else
2152 		ret = vcn_v4_0_start(vinst);
2153 
2154 	if (!ret)
2155 		vinst->cur_state = state;
2156 
2157 	return ret;
2158 }
2159 
2160 /**
2161  * vcn_v4_0_set_ras_interrupt_state - set VCN block RAS interrupt state
2162  *
2163  * @adev: amdgpu_device pointer
2164  * @source: interrupt sources
2165  * @type: interrupt types
2166  * @state: interrupt states
2167  *
2168  * Set VCN block RAS interrupt state
2169  */
2170 static int vcn_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev,
2171 	struct amdgpu_irq_src *source,
2172 	unsigned int type,
2173 	enum amdgpu_interrupt_state state)
2174 {
2175 	return 0;
2176 }
2177 
2178 /**
2179  * vcn_v4_0_process_interrupt - process VCN block interrupt
2180  *
2181  * @adev: amdgpu_device pointer
2182  * @source: interrupt sources
2183  * @entry: interrupt entry from clients and sources
2184  *
2185  * Process VCN block interrupt
2186  */
2187 static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
2188       struct amdgpu_iv_entry *entry)
2189 {
2190 	uint32_t ip_instance;
2191 
2192 	if (amdgpu_sriov_is_vcn_rb_decouple(adev)) {
2193 		ip_instance = entry->ring_id;
2194 	} else {
2195 		switch (entry->client_id) {
2196 		case SOC15_IH_CLIENTID_VCN:
2197 			ip_instance = 0;
2198 			break;
2199 		case SOC15_IH_CLIENTID_VCN1:
2200 			ip_instance = 1;
2201 			break;
2202 		default:
2203 			DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
2204 			return 0;
2205 		}
2206 	}
2207 
2208 	DRM_DEBUG("IH: VCN TRAP\n");
2209 
2210 	switch (entry->src_id) {
2211 	case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
2212 		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
2213 		break;
2214 	default:
2215 		DRM_ERROR("Unhandled interrupt: %d %d\n",
2216 			  entry->src_id, entry->src_data[0]);
2217 		break;
2218 	}
2219 
2220 	return 0;
2221 }
2222 
2223 static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = {
2224 	.process = vcn_v4_0_process_interrupt,
2225 };
2226 
2227 static const struct amdgpu_irq_src_funcs vcn_v4_0_ras_irq_funcs = {
2228 	.set = vcn_v4_0_set_ras_interrupt_state,
2229 	.process = amdgpu_vcn_process_poison_irq,
2230 };
2231 
2232 /**
2233  * vcn_v4_0_set_irq_funcs - set VCN block interrupt irq functions
2234  *
2235  * @adev: amdgpu_device pointer
2236  *
2237  * Set VCN block interrupt irq functions
2238  */
2239 static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev)
2240 {
2241 	int i;
2242 
2243 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2244 		if (adev->vcn.harvest_config & (1 << i))
2245 			continue;
2246 
2247 		adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
2248 		adev->vcn.inst[i].irq.funcs = &vcn_v4_0_irq_funcs;
2249 
2250 		adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
2251 		adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v4_0_ras_irq_funcs;
2252 	}
2253 }
2254 
2255 static void vcn_v4_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
2256 {
2257 	struct amdgpu_device *adev = ip_block->adev;
2258 	int i, j;
2259 	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
2260 	uint32_t inst_off, is_powered;
2261 
2262 	if (!adev->vcn.ip_dump)
2263 		return;
2264 
2265 	drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
2266 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
2267 		if (adev->vcn.harvest_config & (1 << i)) {
2268 			drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
2269 			continue;
2270 		}
2271 
2272 		inst_off = i * reg_count;
2273 		is_powered = (adev->vcn.ip_dump[inst_off] &
2274 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
2275 
2276 		if (is_powered) {
2277 			drm_printf(p, "\nActive Instance:VCN%d\n", i);
2278 			for (j = 0; j < reg_count; j++)
2279 				drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0[j].reg_name,
2280 					   adev->vcn.ip_dump[inst_off + j]);
2281 		} else {
2282 			drm_printf(p, "\nInactive Instance:VCN%d\n", i);
2283 		}
2284 	}
2285 }
2286 
2287 static void vcn_v4_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
2288 {
2289 	struct amdgpu_device *adev = ip_block->adev;
2290 	int i, j;
2291 	bool is_powered;
2292 	uint32_t inst_off;
2293 	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
2294 
2295 	if (!adev->vcn.ip_dump)
2296 		return;
2297 
2298 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
2299 		if (adev->vcn.harvest_config & (1 << i))
2300 			continue;
2301 
2302 		inst_off = i * reg_count;
2303 		/* mmUVD_POWER_STATUS is always readable and is first element of the array */
2304 		adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
2305 		is_powered = (adev->vcn.ip_dump[inst_off] &
2306 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
2307 
2308 		if (is_powered)
2309 			for (j = 1; j < reg_count; j++)
2310 				adev->vcn.ip_dump[inst_off + j] =
2311 					RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0[j],
2312 									   i));
2313 	}
2314 }
2315 
2316 static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
2317 	.name = "vcn_v4_0",
2318 	.early_init = vcn_v4_0_early_init,
2319 	.sw_init = vcn_v4_0_sw_init,
2320 	.sw_fini = vcn_v4_0_sw_fini,
2321 	.hw_init = vcn_v4_0_hw_init,
2322 	.hw_fini = vcn_v4_0_hw_fini,
2323 	.suspend = vcn_v4_0_suspend,
2324 	.resume = vcn_v4_0_resume,
2325 	.is_idle = vcn_v4_0_is_idle,
2326 	.wait_for_idle = vcn_v4_0_wait_for_idle,
2327 	.set_clockgating_state = vcn_v4_0_set_clockgating_state,
2328 	.set_powergating_state = vcn_set_powergating_state,
2329 	.dump_ip_state = vcn_v4_0_dump_ip_state,
2330 	.print_ip_state = vcn_v4_0_print_ip_state,
2331 };
2332 
2333 const struct amdgpu_ip_block_version vcn_v4_0_ip_block = {
2334 	.type = AMD_IP_BLOCK_TYPE_VCN,
2335 	.major = 4,
2336 	.minor = 0,
2337 	.rev = 0,
2338 	.funcs = &vcn_v4_0_ip_funcs,
2339 };
2340 
2341 static uint32_t vcn_v4_0_query_poison_by_instance(struct amdgpu_device *adev,
2342 			uint32_t instance, uint32_t sub_block)
2343 {
2344 	uint32_t poison_stat = 0, reg_value = 0;
2345 
2346 	switch (sub_block) {
2347 	case AMDGPU_VCN_V4_0_VCPU_VCODEC:
2348 		reg_value = RREG32_SOC15(VCN, instance, regUVD_RAS_VCPU_VCODEC_STATUS);
2349 		poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
2350 		break;
2351 	default:
2352 		break;
2353 	}
2354 
2355 	if (poison_stat)
2356 		dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
2357 			instance, sub_block);
2358 
2359 	return poison_stat;
2360 }
2361 
2362 static bool vcn_v4_0_query_ras_poison_status(struct amdgpu_device *adev)
2363 {
2364 	uint32_t inst, sub;
2365 	uint32_t poison_stat = 0;
2366 
2367 	for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
2368 		for (sub = 0; sub < AMDGPU_VCN_V4_0_MAX_SUB_BLOCK; sub++)
2369 			poison_stat +=
2370 				vcn_v4_0_query_poison_by_instance(adev, inst, sub);
2371 
2372 	return !!poison_stat;
2373 }
2374 
2375 const struct amdgpu_ras_block_hw_ops vcn_v4_0_ras_hw_ops = {
2376 	.query_poison_status = vcn_v4_0_query_ras_poison_status,
2377 };
2378 
2379 static struct amdgpu_vcn_ras vcn_v4_0_ras = {
2380 	.ras_block = {
2381 		.hw_ops = &vcn_v4_0_ras_hw_ops,
2382 		.ras_late_init = amdgpu_vcn_ras_late_init,
2383 	},
2384 };
2385 
2386 static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev)
2387 {
2388 	switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
2389 	case IP_VERSION(4, 0, 0):
2390 		adev->vcn.ras = &vcn_v4_0_ras;
2391 		break;
2392 	default:
2393 		break;
2394 	}
2395 }
2396