xref: /linux/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 /*
2  * Copyright 2023 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include "amdgpu.h"
26 #include "amdgpu_vcn.h"
27 #include "amdgpu_pm.h"
28 #include "soc15.h"
29 #include "soc15d.h"
30 #include "soc15_hw_ip.h"
31 #include "vcn_v2_0.h"
32 
33 #include "vcn/vcn_5_0_0_offset.h"
34 #include "vcn/vcn_5_0_0_sh_mask.h"
35 #include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
36 #include "vcn_v5_0_0.h"
37 
38 #include <drm/drm_drv.h>
39 
40 static const struct amdgpu_hwip_reg_entry vcn_reg_list_5_0[] = {
41 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
42 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
43 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
44 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
45 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
46 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
47 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
48 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
49 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
50 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
51 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
52 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
53 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
54 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
55 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
56 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
57 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
58 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
59 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
60 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
61 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
62 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
63 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
64 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
65 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
66 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
67 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
68 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
69 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
70 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
71 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
72 };
73 
74 static int amdgpu_ih_clientid_vcns[] = {
75 	SOC15_IH_CLIENTID_VCN,
76 	SOC15_IH_CLIENTID_VCN1
77 };
78 
79 static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev);
80 static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev);
81 static int vcn_v5_0_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
82 				   enum amd_powergating_state state);
83 static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
84 				     struct dpg_pause_state *new_state);
85 static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
86 
87 /**
88  * vcn_v5_0_0_early_init - set function pointers and load microcode
89  *
90  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
91  *
92  * Set ring and irq function pointers
93  * Load microcode from filesystem
94  */
95 static int vcn_v5_0_0_early_init(struct amdgpu_ip_block *ip_block)
96 {
97 	struct amdgpu_device *adev = ip_block->adev;
98 	int i, r;
99 
100 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
101 		/* re-use enc ring as unified ring */
102 		adev->vcn.inst[i].num_enc_rings = 1;
103 
104 	vcn_v5_0_0_set_unified_ring_funcs(adev);
105 	vcn_v5_0_0_set_irq_funcs(adev);
106 
107 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
108 		adev->vcn.inst[i].set_pg_state = vcn_v5_0_0_set_pg_state;
109 
110 		r = amdgpu_vcn_early_init(adev, i);
111 		if (r)
112 			return r;
113 	}
114 
115 	return 0;
116 }
117 
118 /**
119  * vcn_v5_0_0_sw_init - sw init for VCN block
120  *
121  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
122  *
123  * Load firmware and sw initialization
124  */
125 static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
126 {
127 	struct amdgpu_ring *ring;
128 	struct amdgpu_device *adev = ip_block->adev;
129 	int i, r;
130 
131 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
132 		struct amdgpu_vcn5_fw_shared *fw_shared;
133 
134 		if (adev->vcn.harvest_config & (1 << i))
135 			continue;
136 
137 		r = amdgpu_vcn_sw_init(adev, i);
138 		if (r)
139 			return r;
140 
141 		amdgpu_vcn_setup_ucode(adev, i);
142 
143 		r = amdgpu_vcn_resume(adev, i);
144 		if (r)
145 			return r;
146 
147 		atomic_set(&adev->vcn.inst[i].sched_score, 0);
148 
149 		/* VCN UNIFIED TRAP */
150 		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
151 				VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
152 		if (r)
153 			return r;
154 
155 		/* VCN POISON TRAP */
156 		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
157 				VCN_5_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq);
158 		if (r)
159 			return r;
160 
161 		ring = &adev->vcn.inst[i].ring_enc[0];
162 		ring->use_doorbell = true;
163 		ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
164 
165 		ring->vm_hub = AMDGPU_MMHUB0(0);
166 		sprintf(ring->name, "vcn_unified_%d", i);
167 
168 		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
169 						AMDGPU_RING_PRIO_0, &adev->vcn.inst[i].sched_score);
170 		if (r)
171 			return r;
172 
173 		fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
174 		fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
175 		fw_shared->sq.is_enabled = 1;
176 
177 		fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG);
178 		fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ?
179 			AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU;
180 
181 		if (amdgpu_vcnfw_log)
182 			amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
183 
184 		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
185 			adev->vcn.inst[i].pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode;
186 	}
187 
188 	adev->vcn.supported_reset =
189 		amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
190 	if (!amdgpu_sriov_vf(adev))
191 		adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
192 
193 	r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_5_0, ARRAY_SIZE(vcn_reg_list_5_0));
194 	if (r)
195 		return r;
196 
197 	r = amdgpu_vcn_sysfs_reset_mask_init(adev);
198 	if (r)
199 		return r;
200 
201 	return 0;
202 }
203 
204 /**
205  * vcn_v5_0_0_sw_fini - sw fini for VCN block
206  *
207  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
208  *
209  * VCN suspend and free up sw allocation
210  */
211 static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
212 {
213 	struct amdgpu_device *adev = ip_block->adev;
214 	int i, r, idx;
215 
216 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
217 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
218 			struct amdgpu_vcn5_fw_shared *fw_shared;
219 
220 			if (adev->vcn.harvest_config & (1 << i))
221 				continue;
222 
223 			fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
224 			fw_shared->present_flag_0 = 0;
225 			fw_shared->sq.is_enabled = 0;
226 		}
227 
228 		drm_dev_exit(idx);
229 	}
230 
231 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
232 		r = amdgpu_vcn_suspend(adev, i);
233 		if (r)
234 			return r;
235 	}
236 
237 	amdgpu_vcn_sysfs_reset_mask_fini(adev);
238 
239 	for (i = 0; i < adev->vcn.num_vcn_inst; i++)
240 		amdgpu_vcn_sw_fini(adev, i);
241 
242 	return 0;
243 }
244 
245 /**
246  * vcn_v5_0_0_hw_init - start and test VCN block
247  *
248  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
249  *
250  * Initialize the hardware, boot up the VCPU and do some testing
251  */
252 static int vcn_v5_0_0_hw_init(struct amdgpu_ip_block *ip_block)
253 {
254 	struct amdgpu_device *adev = ip_block->adev;
255 	struct amdgpu_ring *ring;
256 	int i, r;
257 
258 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
259 		if (adev->vcn.harvest_config & (1 << i))
260 			continue;
261 
262 		ring = &adev->vcn.inst[i].ring_enc[0];
263 
264 		adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
265 			((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i);
266 
267 		r = amdgpu_ring_test_helper(ring);
268 		if (r)
269 			return r;
270 	}
271 
272 	return 0;
273 }
274 
275 /**
276  * vcn_v5_0_0_hw_fini - stop the hardware block
277  *
278  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
279  *
280  * Stop the VCN block, mark ring as not ready any more
281  */
282 static int vcn_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
283 {
284 	struct amdgpu_device *adev = ip_block->adev;
285 	int i;
286 
287 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
288 		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
289 
290 		if (adev->vcn.harvest_config & (1 << i))
291 			continue;
292 
293 		cancel_delayed_work_sync(&vinst->idle_work);
294 
295 		if (!amdgpu_sriov_vf(adev)) {
296 			if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
297 			    (vinst->cur_state != AMD_PG_STATE_GATE &&
298 			     RREG32_SOC15(VCN, i, regUVD_STATUS))) {
299 				vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
300 			}
301 		}
302 	}
303 
304 	return 0;
305 }
306 
307 /**
308  * vcn_v5_0_0_suspend - suspend VCN block
309  *
310  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
311  *
312  * HW fini and suspend VCN block
313  */
314 static int vcn_v5_0_0_suspend(struct amdgpu_ip_block *ip_block)
315 {
316 	struct amdgpu_device *adev = ip_block->adev;
317 	int r, i;
318 
319 	r = vcn_v5_0_0_hw_fini(ip_block);
320 	if (r)
321 		return r;
322 
323 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
324 		r = amdgpu_vcn_suspend(ip_block->adev, i);
325 		if (r)
326 			return r;
327 	}
328 
329 	return r;
330 }
331 
332 /**
333  * vcn_v5_0_0_resume - resume VCN block
334  *
335  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
336  *
337  * Resume firmware and hw init VCN block
338  */
339 static int vcn_v5_0_0_resume(struct amdgpu_ip_block *ip_block)
340 {
341 	struct amdgpu_device *adev = ip_block->adev;
342 	int r, i;
343 
344 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
345 		r = amdgpu_vcn_resume(ip_block->adev, i);
346 		if (r)
347 			return r;
348 	}
349 
350 	r = vcn_v5_0_0_hw_init(ip_block);
351 
352 	return r;
353 }
354 
355 /**
356  * vcn_v5_0_0_mc_resume - memory controller programming
357  *
358  * @vinst: VCN instance
359  *
360  * Let the VCN memory controller know it's offsets
361  */
362 static void vcn_v5_0_0_mc_resume(struct amdgpu_vcn_inst *vinst)
363 {
364 	struct amdgpu_device *adev = vinst->adev;
365 	int inst = vinst->inst;
366 	uint32_t offset, size;
367 	const struct common_firmware_header *hdr;
368 
369 	hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data;
370 	size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
371 
372 	/* cache window 0: fw */
373 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
374 		WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
375 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
376 		WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
377 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
378 		WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, 0);
379 		offset = 0;
380 	} else {
381 		WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
382 			lower_32_bits(adev->vcn.inst[inst].gpu_addr));
383 		WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
384 			upper_32_bits(adev->vcn.inst[inst].gpu_addr));
385 		offset = size;
386 		WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
387 	}
388 	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE0, size);
389 
390 	/* cache window 1: stack */
391 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
392 		lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
393 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
394 		upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
395 	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET1, 0);
396 	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
397 
398 	/* cache window 2: context */
399 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
400 		lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
401 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
402 		upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
403 	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET2, 0);
404 	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
405 
406 	/* non-cache window */
407 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
408 		lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
409 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
410 		upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
411 	WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
412 	WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_SIZE0,
413 		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)));
414 }
415 
416 /**
417  * vcn_v5_0_0_mc_resume_dpg_mode - memory controller programming for dpg mode
418  *
419  * @vinst: VCN instance
420  * @indirect: indirectly write sram
421  *
422  * Let the VCN memory controller know it's offsets with dpg mode
423  */
424 static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
425 					  bool indirect)
426 {
427 	struct amdgpu_device *adev = vinst->adev;
428 	int inst_idx = vinst->inst;
429 	uint32_t offset, size;
430 	const struct common_firmware_header *hdr;
431 
432 	hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
433 	size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
434 
435 	/* cache window 0: fw */
436 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
437 		if (!indirect) {
438 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
439 				VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
440 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
441 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
442 				VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
443 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
444 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
445 				VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
446 		} else {
447 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
448 				VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
449 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
450 				VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
451 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
452 				VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
453 		}
454 		offset = 0;
455 	} else {
456 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
457 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
458 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
459 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
460 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
461 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
462 		offset = size;
463 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
464 			VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0),
465 			AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
466 	}
467 
468 	if (!indirect)
469 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
470 			VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
471 	else
472 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
473 			VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
474 
475 	/* cache window 1: stack */
476 	if (!indirect) {
477 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
478 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
479 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
480 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
481 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
482 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
483 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
484 			VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
485 	} else {
486 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
487 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
488 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
489 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
490 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
491 			VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
492 	}
493 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
494 			VCN, inst_idx, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
495 
496 	/* cache window 2: context */
497 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
498 		VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
499 		lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
500 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
501 		VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
502 		upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
503 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
504 		VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
505 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
506 		VCN, inst_idx, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
507 
508 	/* non-cache window */
509 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
510 		VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
511 		lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
512 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
513 		VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
514 		upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
515 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
516 		VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
517 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
518 		VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0),
519 		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)), 0, indirect);
520 
521 	/* VCN global tiling registers */
522 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
523 		VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
524 		adev->gfx.config.gb_addr_config, 0, indirect);
525 
526 	return;
527 }
528 
529 /**
530  * vcn_v5_0_0_disable_static_power_gating - disable VCN static power gating
531  *
532  * @vinst: VCN instance
533  *
534  * Disable static power gating for VCN block
535  */
536 static void vcn_v5_0_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
537 {
538 	struct amdgpu_device *adev = vinst->adev;
539 	int inst = vinst->inst;
540 	uint32_t data = 0;
541 
542 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
543 		data = 1 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT;
544 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
545 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
546 				UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
547 
548 		data = 2 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT;
549 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
550 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
551 				1 << UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT,
552 				UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
553 
554 		data = 2 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT;
555 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
556 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
557 				1 << UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT,
558 				UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
559 
560 		data = 2 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT;
561 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
562 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
563 				1 << UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT,
564 				UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
565 	} else {
566 		data = 1 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT;
567 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
568 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
569 				UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
570 
571 		data = 1 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT;
572 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
573 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
574 				UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
575 
576 		data = 1 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT;
577 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
578 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
579 				UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
580 
581 		data = 1 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT;
582 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
583 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
584 				UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
585 	}
586 
587 	data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
588 	data &= ~0x103;
589 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
590 		data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
591 			UVD_POWER_STATUS__UVD_PG_EN_MASK;
592 
593 	WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
594 	return;
595 }
596 
597 /**
598  * vcn_v5_0_0_enable_static_power_gating - enable VCN static power gating
599  *
600  * @vinst: VCN instance
601  *
602  * Enable static power gating for VCN block
603  */
604 static void vcn_v5_0_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
605 {
606 	struct amdgpu_device *adev = vinst->adev;
607 	int inst = vinst->inst;
608 	uint32_t data;
609 
610 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
611 		/* Before power off, this indicator has to be turned on */
612 		data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
613 		data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
614 		data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
615 		WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
616 
617 		data = 2 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT;
618 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
619 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
620 				1 << UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT,
621 				UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
622 
623 		data = 2 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT;
624 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
625 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
626 				1 << UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT,
627 				UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
628 
629 		data = 2 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT;
630 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
631 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
632 				1 << UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT,
633 				UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
634 
635 		data = 2 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT;
636 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
637 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
638 				1 << UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS__SHIFT,
639 				UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
640 	}
641 	return;
642 }
643 
644 /**
645  * vcn_v5_0_0_disable_clock_gating - disable VCN clock gating
646  *
647  * @vinst: VCN instance
648  *
649  * Disable clock gating for VCN block
650  */
651 static void vcn_v5_0_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
652 {
653 	return;
654 }
655 
656 #if 0
657 /**
658  * vcn_v5_0_0_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
659  *
660  * @vinst: VCN instance
661  * @sram_sel: sram select
662  * @indirect: indirectly write sram
663  *
664  * Disable clock gating for VCN block with dpg mode
665  */
666 static void vcn_v5_0_0_disable_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
667 						     uint8_t sram_sel,
668 						     uint8_t indirect)
669 {
670 	return;
671 }
672 #endif
673 
674 /**
675  * vcn_v5_0_0_enable_clock_gating - enable VCN clock gating
676  *
677  * @vinst: VCN instance
678  *
679  * Enable clock gating for VCN block
680  */
681 static void vcn_v5_0_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
682 {
683 	return;
684 }
685 
686 /**
687  * vcn_v5_0_0_start_dpg_mode - VCN start with dpg mode
688  *
689  * @vinst: VCN instance
690  * @indirect: indirectly write sram
691  *
692  * Start VCN block with dpg mode
693  */
694 static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
695 				     bool indirect)
696 {
697 	struct amdgpu_device *adev = vinst->adev;
698 	int inst_idx = vinst->inst;
699 	struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
700 	struct amdgpu_ring *ring;
701 	uint32_t tmp;
702 	int ret;
703 
704 	/* disable register anti-hang mechanism */
705 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
706 		~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
707 
708 	/* enable dynamic power gating mode */
709 	tmp = RREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS);
710 	tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
711 	tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
712 	WREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS, tmp);
713 
714 	if (indirect)
715 		adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
716 
717 	/* enable VCPU clock */
718 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
719 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK;
720 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
721 		VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect);
722 
723 	/* disable master interrupt */
724 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
725 		VCN, inst_idx, regUVD_MASTINT_EN), 0, 0, indirect);
726 
727 	/* setup regUVD_LMI_CTRL */
728 	tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
729 		UVD_LMI_CTRL__REQ_MODE_MASK |
730 		UVD_LMI_CTRL__CRC_RESET_MASK |
731 		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
732 		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
733 		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
734 		(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
735 		0x00100000L);
736 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
737 		VCN, inst_idx, regUVD_LMI_CTRL), tmp, 0, indirect);
738 
739 	vcn_v5_0_0_mc_resume_dpg_mode(vinst, indirect);
740 
741 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
742 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
743 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
744 		VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect);
745 
746 	/* enable LMI MC and UMC channels */
747 	tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
748 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
749 		VCN, inst_idx, regUVD_LMI_CTRL2), tmp, 0, indirect);
750 
751 	/* enable master interrupt */
752 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
753 		VCN, inst_idx, regUVD_MASTINT_EN),
754 		UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
755 
756 	if (indirect) {
757 		ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
758 		if (ret) {
759 			dev_err(adev->dev, "%s: vcn sram load failed %d\n", __func__, ret);
760 			return ret;
761 		}
762 	}
763 
764 	ring = &adev->vcn.inst[inst_idx].ring_enc[0];
765 
766 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO, ring->gpu_addr);
767 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
768 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE, ring->ring_size / 4);
769 
770 	tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
771 	tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
772 	WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
773 	fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
774 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR, 0);
775 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, 0);
776 
777 	tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR);
778 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, tmp);
779 	ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
780 
781 	tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
782 	tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
783 	WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
784 	fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
785 
786 	WREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL,
787 		ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
788 		VCN_RB1_DB_CTRL__EN_MASK);
789 
790 	/* Keeping one read-back to ensure all register writes are done,
791 	 * otherwise it may introduce race conditions.
792 	 */
793 	RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
794 
795 	return 0;
796 }
797 
798 /**
799  * vcn_v5_0_0_start - VCN start
800  *
801  * @vinst: VCN instance
802  *
803  * Start VCN block
804  */
805 static int vcn_v5_0_0_start(struct amdgpu_vcn_inst *vinst)
806 {
807 	struct amdgpu_device *adev = vinst->adev;
808 	int i = vinst->inst;
809 	struct amdgpu_vcn5_fw_shared *fw_shared;
810 	struct amdgpu_ring *ring;
811 	uint32_t tmp;
812 	int j, k, r;
813 
814 	if (adev->vcn.harvest_config & (1 << i))
815 		return 0;
816 
817 	if (adev->pm.dpm_enabled)
818 		amdgpu_dpm_enable_vcn(adev, true, i);
819 
820 	fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
821 
822 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
823 		return vcn_v5_0_0_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
824 
825 	/* disable VCN power gating */
826 	vcn_v5_0_0_disable_static_power_gating(vinst);
827 
828 	/* set VCN status busy */
829 	tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
830 	WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
831 
832 	/* enable VCPU clock */
833 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
834 		 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
835 
836 	/* disable master interrupt */
837 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
838 		 ~UVD_MASTINT_EN__VCPU_EN_MASK);
839 
840 	/* enable LMI MC and UMC channels */
841 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
842 		 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
843 
844 	tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
845 	tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
846 	tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
847 	WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
848 
849 	/* setup regUVD_LMI_CTRL */
850 	tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
851 	WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
852 		     UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
853 		     UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
854 		     UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
855 		     UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
856 
857 	vcn_v5_0_0_mc_resume(vinst);
858 
859 	/* VCN global tiling registers */
860 	WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
861 		     adev->gfx.config.gb_addr_config);
862 
863 	/* unblock VCPU register access */
864 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
865 		 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
866 
867 	/* release VCPU reset to boot */
868 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
869 		 ~UVD_VCPU_CNTL__BLK_RST_MASK);
870 
871 	for (j = 0; j < 10; ++j) {
872 		uint32_t status;
873 
874 		for (k = 0; k < 100; ++k) {
875 			status = RREG32_SOC15(VCN, i, regUVD_STATUS);
876 			if (status & 2)
877 				break;
878 			mdelay(10);
879 			if (amdgpu_emu_mode == 1)
880 				msleep(1);
881 		}
882 
883 		if (amdgpu_emu_mode == 1) {
884 			r = -1;
885 			if (status & 2) {
886 				r = 0;
887 				break;
888 			}
889 		} else {
890 			r = 0;
891 			if (status & 2)
892 				break;
893 
894 			dev_err(adev->dev,
895 				"VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
896 			WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
897 				 UVD_VCPU_CNTL__BLK_RST_MASK,
898 				 ~UVD_VCPU_CNTL__BLK_RST_MASK);
899 			mdelay(10);
900 			WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
901 				 ~UVD_VCPU_CNTL__BLK_RST_MASK);
902 
903 			mdelay(10);
904 			r = -1;
905 		}
906 	}
907 
908 	if (r) {
909 		dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
910 		return r;
911 	}
912 
913 	/* enable master interrupt */
914 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
915 		 UVD_MASTINT_EN__VCPU_EN_MASK,
916 		 ~UVD_MASTINT_EN__VCPU_EN_MASK);
917 
918 	/* clear the busy bit of VCN_STATUS */
919 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
920 		 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
921 
922 	ring = &adev->vcn.inst[i].ring_enc[0];
923 	WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
924 		     ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
925 		     VCN_RB1_DB_CTRL__EN_MASK);
926 
927 	WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
928 	WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
929 	WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
930 
931 	tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
932 	tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
933 	WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
934 	fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
935 	WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
936 	WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
937 
938 	tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
939 	WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
940 	ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
941 
942 	tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
943 	tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
944 	WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
945 	fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
946 
947 	/* Keeping one read-back to ensure all register writes are done,
948 	 * otherwise it may introduce race conditions.
949 	 */
950 	RREG32_SOC15(VCN, i, regUVD_STATUS);
951 
952 	return 0;
953 }
954 
955 /**
956  * vcn_v5_0_0_stop_dpg_mode - VCN stop with dpg mode
957  *
958  * @vinst: VCN instance
959  *
960  * Stop VCN block with dpg mode
961  */
962 static void vcn_v5_0_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
963 {
964 	struct amdgpu_device *adev = vinst->adev;
965 	int inst_idx = vinst->inst;
966 	struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
967 	uint32_t tmp;
968 
969 	vcn_v5_0_0_pause_dpg_mode(vinst, &state);
970 
971 	/* Wait for power status to be 1 */
972 	SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
973 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
974 
975 	/* wait for read ptr to be equal to write ptr */
976 	tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
977 	SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
978 
979 	/* disable dynamic power gating mode */
980 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
981 		~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
982 
983 	/* Keeping one read-back to ensure all register writes are done,
984 	 * otherwise it may introduce race conditions.
985 	 */
986 	RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
987 
988 	return;
989 }
990 
991 /**
992  * vcn_v5_0_0_stop - VCN stop
993  *
994  * @vinst: VCN instance
995  *
996  * Stop VCN block
997  */
998 static int vcn_v5_0_0_stop(struct amdgpu_vcn_inst *vinst)
999 {
1000 	struct amdgpu_device *adev = vinst->adev;
1001 	int i = vinst->inst;
1002 	struct amdgpu_vcn5_fw_shared *fw_shared;
1003 	uint32_t tmp;
1004 	int r = 0;
1005 
1006 	if (adev->vcn.harvest_config & (1 << i))
1007 		return 0;
1008 
1009 	fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1010 	fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
1011 
1012 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1013 		vcn_v5_0_0_stop_dpg_mode(vinst);
1014 		r = 0;
1015 		goto done;
1016 	}
1017 
1018 	/* wait for vcn idle */
1019 	r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1020 	if (r)
1021 		goto done;
1022 
1023 	tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1024 		UVD_LMI_STATUS__READ_CLEAN_MASK |
1025 		UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1026 		UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1027 	r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
1028 	if (r)
1029 		goto done;
1030 
1031 	/* disable LMI UMC channel */
1032 	tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
1033 	tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1034 	WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
1035 	tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1036 		UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1037 	r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
1038 	if (r)
1039 		goto done;
1040 
1041 	/* block VCPU register access */
1042 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
1043 		 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1044 		 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1045 
1046 	/* reset VCPU */
1047 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
1048 		 UVD_VCPU_CNTL__BLK_RST_MASK,
1049 		 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1050 
1051 	/* disable VCPU clock */
1052 	WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
1053 		 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1054 
1055 	/* apply soft reset */
1056 	tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1057 	tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1058 	WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1059 	tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1060 	tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1061 	WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1062 
1063 	/* clear status */
1064 	WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
1065 
1066 	/* enable VCN power gating */
1067 	vcn_v5_0_0_enable_static_power_gating(vinst);
1068 
1069 	/* Keeping one read-back to ensure all register writes are done,
1070 	 * otherwise it may introduce race conditions.
1071 	 */
1072 	RREG32_SOC15(VCN, i, regUVD_STATUS);
1073 
1074 done:
1075 	if (adev->pm.dpm_enabled)
1076 		amdgpu_dpm_enable_vcn(adev, false, i);
1077 
1078 	return r;
1079 }
1080 
1081 /**
1082  * vcn_v5_0_0_pause_dpg_mode - VCN pause with dpg mode
1083  *
1084  * @vinst: VCN instance
1085  * @new_state: pause state
1086  *
1087  * Pause dpg mode for VCN block
1088  */
1089 static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
1090 				     struct dpg_pause_state *new_state)
1091 {
1092 	struct amdgpu_device *adev = vinst->adev;
1093 	int inst_idx = vinst->inst;
1094 	uint32_t reg_data = 0;
1095 	int ret_code;
1096 
1097 	/* pause/unpause if state is changed */
1098 	if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1099 		DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d",
1100 			adev->vcn.inst[inst_idx].pause_state.fw_based,  new_state->fw_based);
1101 		reg_data = RREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE) &
1102 			(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1103 
1104 		if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1105 			ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 0x1,
1106 					UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1107 
1108 			if (!ret_code) {
1109 				/* pause DPG */
1110 				reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1111 				WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
1112 
1113 				/* wait for ACK */
1114 				SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_DPG_PAUSE,
1115 					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1116 					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1117 			}
1118 		} else {
1119 			/* unpause dpg, no need to wait */
1120 			reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1121 			WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
1122 		}
1123 		adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1124 	}
1125 
1126 	return 0;
1127 }
1128 
1129 /**
1130  * vcn_v5_0_0_unified_ring_get_rptr - get unified read pointer
1131  *
1132  * @ring: amdgpu_ring pointer
1133  *
1134  * Returns the current hardware unified read pointer
1135  */
1136 static uint64_t vcn_v5_0_0_unified_ring_get_rptr(struct amdgpu_ring *ring)
1137 {
1138 	struct amdgpu_device *adev = ring->adev;
1139 
1140 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1141 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1142 
1143 	return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR);
1144 }
1145 
1146 /**
1147  * vcn_v5_0_0_unified_ring_get_wptr - get unified write pointer
1148  *
1149  * @ring: amdgpu_ring pointer
1150  *
1151  * Returns the current hardware unified write pointer
1152  */
1153 static uint64_t vcn_v5_0_0_unified_ring_get_wptr(struct amdgpu_ring *ring)
1154 {
1155 	struct amdgpu_device *adev = ring->adev;
1156 
1157 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1158 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1159 
1160 	if (ring->use_doorbell)
1161 		return *ring->wptr_cpu_addr;
1162 	else
1163 		return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR);
1164 }
1165 
1166 /**
1167  * vcn_v5_0_0_unified_ring_set_wptr - set enc write pointer
1168  *
1169  * @ring: amdgpu_ring pointer
1170  *
1171  * Commits the enc write pointer to the hardware
1172  */
1173 static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring)
1174 {
1175 	struct amdgpu_device *adev = ring->adev;
1176 
1177 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1178 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1179 
1180 	if (ring->use_doorbell) {
1181 		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1182 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1183 	} else {
1184 		WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR, lower_32_bits(ring->wptr));
1185 	}
1186 }
1187 
1188 static int vcn_v5_0_0_ring_reset(struct amdgpu_ring *ring,
1189 				 unsigned int vmid,
1190 				 struct amdgpu_fence *timedout_fence)
1191 {
1192 	struct amdgpu_device *adev = ring->adev;
1193 	struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
1194 	int r;
1195 
1196 	amdgpu_ring_reset_helper_begin(ring, timedout_fence);
1197 	r = vcn_v5_0_0_stop(vinst);
1198 	if (r)
1199 		return r;
1200 	r = vcn_v5_0_0_start(vinst);
1201 	if (r)
1202 		return r;
1203 	return amdgpu_ring_reset_helper_end(ring, timedout_fence);
1204 }
1205 
1206 static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = {
1207 	.type = AMDGPU_RING_TYPE_VCN_ENC,
1208 	.align_mask = 0x3f,
1209 	.nop = VCN_ENC_CMD_NO_OP,
1210 	.get_rptr = vcn_v5_0_0_unified_ring_get_rptr,
1211 	.get_wptr = vcn_v5_0_0_unified_ring_get_wptr,
1212 	.set_wptr = vcn_v5_0_0_unified_ring_set_wptr,
1213 	.emit_frame_size =
1214 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1215 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1216 		4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1217 		5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1218 		1, /* vcn_v2_0_enc_ring_insert_end */
1219 	.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1220 	.emit_ib = vcn_v2_0_enc_ring_emit_ib,
1221 	.emit_fence = vcn_v2_0_enc_ring_emit_fence,
1222 	.emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1223 	.test_ring = amdgpu_vcn_enc_ring_test_ring,
1224 	.test_ib = amdgpu_vcn_unified_ring_test_ib,
1225 	.insert_nop = amdgpu_ring_insert_nop,
1226 	.insert_end = vcn_v2_0_enc_ring_insert_end,
1227 	.pad_ib = amdgpu_ring_generic_pad_ib,
1228 	.begin_use = amdgpu_vcn_ring_begin_use,
1229 	.end_use = amdgpu_vcn_ring_end_use,
1230 	.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1231 	.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1232 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1233 	.reset = vcn_v5_0_0_ring_reset,
1234 };
1235 
1236 /**
1237  * vcn_v5_0_0_set_unified_ring_funcs - set unified ring functions
1238  *
1239  * @adev: amdgpu_device pointer
1240  *
1241  * Set unified ring functions
1242  */
1243 static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev)
1244 {
1245 	int i;
1246 
1247 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1248 		if (adev->vcn.harvest_config & (1 << i))
1249 			continue;
1250 
1251 		adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v5_0_0_unified_ring_vm_funcs;
1252 		adev->vcn.inst[i].ring_enc[0].me = i;
1253 	}
1254 }
1255 
1256 /**
1257  * vcn_v5_0_0_is_idle - check VCN block is idle
1258  *
1259  * @ip_block: Pointer to the amdgpu_ip_block structure
1260  *
1261  * Check whether VCN block is idle
1262  */
1263 static bool vcn_v5_0_0_is_idle(struct amdgpu_ip_block *ip_block)
1264 {
1265 	struct amdgpu_device *adev = ip_block->adev;
1266 	int i, ret = 1;
1267 
1268 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1269 		if (adev->vcn.harvest_config & (1 << i))
1270 			continue;
1271 
1272 		ret &= (RREG32_SOC15(VCN, i, regUVD_STATUS) == UVD_STATUS__IDLE);
1273 	}
1274 
1275 	return ret;
1276 }
1277 
1278 /**
1279  * vcn_v5_0_0_wait_for_idle - wait for VCN block idle
1280  *
1281  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
1282  *
1283  * Wait for VCN block idle
1284  */
1285 static int vcn_v5_0_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
1286 {
1287 	struct amdgpu_device *adev = ip_block->adev;
1288 	int i, ret = 0;
1289 
1290 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1291 		if (adev->vcn.harvest_config & (1 << i))
1292 			continue;
1293 
1294 		ret = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE,
1295 			UVD_STATUS__IDLE);
1296 		if (ret)
1297 			return ret;
1298 	}
1299 
1300 	return ret;
1301 }
1302 
1303 /**
1304  * vcn_v5_0_0_set_clockgating_state - set VCN block clockgating state
1305  *
1306  * @ip_block: amdgpu_ip_block pointer
1307  * @state: clock gating state
1308  *
1309  * Set VCN block clockgating state
1310  */
1311 static int vcn_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1312 					  enum amd_clockgating_state state)
1313 {
1314 	struct amdgpu_device *adev = ip_block->adev;
1315 	bool enable = state == AMD_CG_STATE_GATE;
1316 	int i;
1317 
1318 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1319 		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
1320 
1321 		if (adev->vcn.harvest_config & (1 << i))
1322 			continue;
1323 
1324 		if (enable) {
1325 			if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE)
1326 				return -EBUSY;
1327 			vcn_v5_0_0_enable_clock_gating(vinst);
1328 		} else {
1329 			vcn_v5_0_0_disable_clock_gating(vinst);
1330 		}
1331 	}
1332 
1333 	return 0;
1334 }
1335 
1336 static int vcn_v5_0_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
1337 				   enum amd_powergating_state state)
1338 {
1339 	int ret = 0;
1340 
1341 	if (state == vinst->cur_state)
1342 		return 0;
1343 
1344 	if (state == AMD_PG_STATE_GATE)
1345 		ret = vcn_v5_0_0_stop(vinst);
1346 	else
1347 		ret = vcn_v5_0_0_start(vinst);
1348 
1349 	if (!ret)
1350 		vinst->cur_state = state;
1351 
1352 	return ret;
1353 }
1354 
1355 /**
1356  * vcn_v5_0_0_process_interrupt - process VCN block interrupt
1357  *
1358  * @adev: amdgpu_device pointer
1359  * @source: interrupt sources
1360  * @entry: interrupt entry from clients and sources
1361  *
1362  * Process VCN block interrupt
1363  */
1364 static int vcn_v5_0_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
1365 	struct amdgpu_iv_entry *entry)
1366 {
1367 	uint32_t ip_instance;
1368 
1369 	switch (entry->client_id) {
1370 	case SOC15_IH_CLIENTID_VCN:
1371 		ip_instance = 0;
1372 		break;
1373 	case SOC15_IH_CLIENTID_VCN1:
1374 		ip_instance = 1;
1375 		break;
1376 	default:
1377 		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1378 		return 0;
1379 	}
1380 
1381 	DRM_DEBUG("IH: VCN TRAP\n");
1382 
1383 	switch (entry->src_id) {
1384 	case VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1385 		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1386 		break;
1387 	case VCN_5_0__SRCID_UVD_POISON:
1388 		amdgpu_vcn_process_poison_irq(adev, source, entry);
1389 		break;
1390 	default:
1391 		DRM_ERROR("Unhandled interrupt: %d %d\n",
1392 			  entry->src_id, entry->src_data[0]);
1393 		break;
1394 	}
1395 
1396 	return 0;
1397 }
1398 
1399 static const struct amdgpu_irq_src_funcs vcn_v5_0_0_irq_funcs = {
1400 	.process = vcn_v5_0_0_process_interrupt,
1401 };
1402 
1403 /**
1404  * vcn_v5_0_0_set_irq_funcs - set VCN block interrupt irq functions
1405  *
1406  * @adev: amdgpu_device pointer
1407  *
1408  * Set VCN block interrupt irq functions
1409  */
1410 static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
1411 {
1412 	int i;
1413 
1414 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1415 		if (adev->vcn.harvest_config & (1 << i))
1416 			continue;
1417 
1418 		adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
1419 		adev->vcn.inst[i].irq.funcs = &vcn_v5_0_0_irq_funcs;
1420 	}
1421 }
1422 
1423 static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
1424 	.name = "vcn_v5_0_0",
1425 	.early_init = vcn_v5_0_0_early_init,
1426 	.sw_init = vcn_v5_0_0_sw_init,
1427 	.sw_fini = vcn_v5_0_0_sw_fini,
1428 	.hw_init = vcn_v5_0_0_hw_init,
1429 	.hw_fini = vcn_v5_0_0_hw_fini,
1430 	.suspend = vcn_v5_0_0_suspend,
1431 	.resume = vcn_v5_0_0_resume,
1432 	.is_idle = vcn_v5_0_0_is_idle,
1433 	.wait_for_idle = vcn_v5_0_0_wait_for_idle,
1434 	.set_clockgating_state = vcn_v5_0_0_set_clockgating_state,
1435 	.set_powergating_state = vcn_set_powergating_state,
1436 	.dump_ip_state = amdgpu_vcn_dump_ip_state,
1437 	.print_ip_state = amdgpu_vcn_print_ip_state,
1438 };
1439 
1440 const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = {
1441 	.type = AMD_IP_BLOCK_TYPE_VCN,
1442 	.major = 5,
1443 	.minor = 0,
1444 	.rev = 0,
1445 	.funcs = &vcn_v5_0_0_ip_funcs,
1446 };
1447