xref: /linux/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_2.c (revision 4a57e0913e8c7fff407e97909f4ae48caa84d612)
1 /*
2  * Copyright 2025-2026 Advanced Micro Devices, Inc. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include "amdgpu.h"
26 #include "amdgpu_vcn.h"
27 #include "amdgpu_pm.h"
28 #include "soc15.h"
29 #include "soc15d.h"
30 #include "soc15_hw_ip.h"
31 #include "vcn_v2_0.h"
32 #include "vcn_v4_0_3.h"
33 
34 #include "vcn/vcn_5_0_0_offset.h"
35 #include "vcn/vcn_5_0_0_sh_mask.h"
36 #include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
37 #include "vcn_v5_0_0.h"
38 #include "vcn_v5_0_1.h"
39 #include "vcn_v5_0_2.h"
40 
41 #include <drm/drm_drv.h>
42 
43 static void vcn_v5_0_2_set_unified_ring_funcs(struct amdgpu_device *adev);
44 static void vcn_v5_0_2_set_irq_funcs(struct amdgpu_device *adev);
45 static int vcn_v5_0_2_set_pg_state(struct amdgpu_vcn_inst *vinst,
46 				   enum amd_powergating_state state);
47 static void vcn_v5_0_2_unified_ring_set_wptr(struct amdgpu_ring *ring);
48 
49 /**
50  * vcn_v5_0_2_early_init - set function pointers and load microcode
51  *
52  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
53  *
54  * Set ring and irq function pointers
55  * Load microcode from filesystem
56  */
57 static int vcn_v5_0_2_early_init(struct amdgpu_ip_block *ip_block)
58 {
59 	struct amdgpu_device *adev = ip_block->adev;
60 	int i, r;
61 
62 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
63 		/* re-use enc ring as unified ring */
64 		adev->vcn.inst[i].num_enc_rings = 1;
65 
66 	vcn_v5_0_2_set_unified_ring_funcs(adev);
67 	vcn_v5_0_2_set_irq_funcs(adev);
68 
69 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
70 		adev->vcn.inst[i].set_pg_state = vcn_v5_0_2_set_pg_state;
71 
72 		r = amdgpu_vcn_early_init(adev, i);
73 		if (r)
74 			return r;
75 	}
76 
77 	return 0;
78 }
79 
80 static void vcn_v5_0_2_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
81 {
82 	struct amdgpu_vcn5_fw_shared *fw_shared;
83 
84 	fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
85 
86 	if (fw_shared->sq.is_enabled)
87 		return;
88 	fw_shared->present_flag_0 =
89 		cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
90 	fw_shared->sq.is_enabled = 1;
91 
92 	if (amdgpu_vcnfw_log)
93 		amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]);
94 }
95 
96 /**
97  * vcn_v5_0_2_sw_init - sw init for VCN block
98  *
99  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
100  *
101  * Load firmware and sw initialization
102  */
103 static int vcn_v5_0_2_sw_init(struct amdgpu_ip_block *ip_block)
104 {
105 	struct amdgpu_device *adev = ip_block->adev;
106 	struct amdgpu_ring *ring;
107 	int i, r, vcn_inst;
108 
109 	/* VCN UNIFIED TRAP */
110 	r = amdgpu_irq_add_id(adev, SOC_V1_0_IH_CLIENTID_VCN,
111 		VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq);
112 	if (r)
113 		return r;
114 
115 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
116 		vcn_inst = GET_INST(VCN, i);
117 
118 		r = amdgpu_vcn_sw_init(adev, i);
119 		if (r)
120 			return r;
121 
122 		amdgpu_vcn_setup_ucode(adev, i);
123 
124 		r = amdgpu_vcn_resume(adev, i);
125 		if (r)
126 			return r;
127 
128 		ring = &adev->vcn.inst[i].ring_enc[0];
129 		ring->use_doorbell = true;
130 
131 		ring->doorbell_index =
132 			(adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 32 * vcn_inst;
133 
134 		ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
135 		sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
136 
137 		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
138 					AMDGPU_RING_PRIO_DEFAULT, &adev->vcn.inst[i].sched_score);
139 		if (r)
140 			return r;
141 
142 		vcn_v5_0_2_fw_shared_init(adev, i);
143 	}
144 
145 	/* TODO: Add queue reset mask when FW fully supports it */
146 	adev->vcn.supported_reset =
147 		amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
148 
149 	return amdgpu_vcn_sysfs_reset_mask_init(adev);
150 }
151 
152 /**
153  * vcn_v5_0_2_sw_fini - sw fini for VCN block
154  *
155  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
156  *
157  * VCN suspend and free up sw allocation
158  */
159 static int vcn_v5_0_2_sw_fini(struct amdgpu_ip_block *ip_block)
160 {
161 	struct amdgpu_device *adev = ip_block->adev;
162 	int i, r, idx;
163 
164 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
165 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
166 			struct amdgpu_vcn5_fw_shared *fw_shared;
167 
168 			fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
169 			fw_shared->present_flag_0 = 0;
170 			fw_shared->sq.is_enabled = 0;
171 		}
172 
173 		drm_dev_exit(idx);
174 	}
175 
176 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
177 		r = amdgpu_vcn_suspend(adev, i);
178 		if (r)
179 			return r;
180 	}
181 
182 	for (i = 0; i < adev->vcn.num_vcn_inst; i++)
183 		amdgpu_vcn_sw_fini(adev, i);
184 
185 	amdgpu_vcn_sysfs_reset_mask_fini(adev);
186 
187 	kfree(adev->vcn.ip_dump); //TODO check
188 
189 	return 0;
190 }
191 
192 /**
193  * vcn_v5_0_2_hw_init - start and test VCN block
194  *
195  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
196  *
197  * Initialize the hardware, boot up the VCPU and do some testing
198  */
199 static int vcn_v5_0_2_hw_init(struct amdgpu_ip_block *ip_block)
200 {
201 	struct amdgpu_device *adev = ip_block->adev;
202 	struct amdgpu_ring *ring;
203 	int i, r, vcn_inst;
204 	uint32_t tmp;
205 
206 	if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x200)
207 		adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
208 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
209 		vcn_inst = GET_INST(VCN, i);
210 		ring = &adev->vcn.inst[i].ring_enc[0];
211 
212 		/* Remove Video Tiles antihang mechanism */
213 		tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS);
214 		tmp &= (~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
215 		WREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS, tmp);
216 
217 		if (ring->use_doorbell)
218 			adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
219 				((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
220 				 11 * vcn_inst),
221 				adev->vcn.inst[i].aid_id);
222 
223 		/* Re-init fw_shared, if required */
224 		vcn_v5_0_2_fw_shared_init(adev, i);
225 
226 		r = amdgpu_ring_test_helper(ring);
227 		if (r)
228 			return r;
229 	}
230 
231 	return 0;
232 }
233 
234 /**
235  * vcn_v5_0_2_hw_fini - stop the hardware block
236  *
237  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
238  *
239  * Stop the VCN block, mark ring as not ready any more
240  */
241 static int vcn_v5_0_2_hw_fini(struct amdgpu_ip_block *ip_block)
242 {
243 	struct amdgpu_device *adev = ip_block->adev;
244 	int i;
245 
246 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
247 		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
248 
249 		cancel_delayed_work_sync(&adev->vcn.inst[i].idle_work);
250 		if (vinst->cur_state != AMD_PG_STATE_GATE)
251 			vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
252 	}
253 
254 	return 0;
255 }
256 
257 /**
258  * vcn_v5_0_2_suspend - suspend VCN block
259  *
260  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
261  *
262  * HW fini and suspend VCN block
263  */
264 static int vcn_v5_0_2_suspend(struct amdgpu_ip_block *ip_block)
265 {
266 	struct amdgpu_device *adev = ip_block->adev;
267 	int r, i;
268 
269 	r = vcn_v5_0_2_hw_fini(ip_block);
270 	if (r)
271 		return r;
272 
273 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
274 		r = amdgpu_vcn_suspend(ip_block->adev, i);
275 		if (r)
276 			return r;
277 	}
278 
279 	return r;
280 }
281 
282 /**
283  * vcn_v5_0_2_resume - resume VCN block
284  *
285  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
286  *
287  * Resume firmware and hw init VCN block
288  */
289 static int vcn_v5_0_2_resume(struct amdgpu_ip_block *ip_block)
290 {
291 	struct amdgpu_device *adev = ip_block->adev;
292 	int r, i;
293 
294 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
295 		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
296 
297 		if (amdgpu_in_reset(adev))
298 			vinst->cur_state = AMD_PG_STATE_GATE;
299 
300 		r = amdgpu_vcn_resume(ip_block->adev, i);
301 		if (r)
302 			return r;
303 	}
304 
305 	r = vcn_v5_0_2_hw_init(ip_block);
306 
307 	return r;
308 }
309 
310 /**
311  * vcn_v5_0_2_mc_resume - memory controller programming
312  *
313  * @vinst: VCN instance
314  *
315  * Let the VCN memory controller know it's offsets
316  */
317 static void vcn_v5_0_2_mc_resume(struct amdgpu_vcn_inst *vinst)
318 {
319 	struct amdgpu_device *adev = vinst->adev;
320 	int inst = vinst->inst;
321 	uint32_t offset, size, vcn_inst;
322 	const struct common_firmware_header *hdr;
323 
324 	hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data;
325 	size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
326 
327 	vcn_inst = GET_INST(VCN, inst);
328 	/* cache window 0: fw */
329 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
330 		WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
331 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
332 		WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
333 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
334 		WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, 0);
335 		offset = 0;
336 	} else {
337 		WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
338 			lower_32_bits(adev->vcn.inst[inst].gpu_addr));
339 		WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
340 			upper_32_bits(adev->vcn.inst[inst].gpu_addr));
341 		offset = size;
342 		WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0,
343 				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
344 	}
345 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE0, size);
346 
347 	/* cache window 1: stack */
348 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
349 		lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
350 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
351 		upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
352 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET1, 0);
353 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
354 
355 	/* cache window 2: context */
356 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
357 		lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
358 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
359 		upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
360 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET2, 0);
361 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
362 
363 	/* non-cache window */
364 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
365 		lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
366 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
367 		upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
368 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
369 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_SIZE0,
370 		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)));
371 }
372 
373 /**
374  * vcn_v5_0_2_mc_resume_dpg_mode - memory controller programming for dpg mode
375  *
376  * @vinst: VCN instance
377  * @indirect: indirectly write sram
378  *
379  * Let the VCN memory controller know it's offsets with dpg mode
380  */
381 static void vcn_v5_0_2_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
382 					  bool indirect)
383 {
384 	struct amdgpu_device *adev = vinst->adev;
385 	int inst_idx = vinst->inst;
386 	uint32_t offset, size;
387 	const struct common_firmware_header *hdr;
388 
389 	hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
390 	size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
391 
392 	/* cache window 0: fw */
393 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
394 		if (!indirect) {
395 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
396 				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
397 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
398 				 inst_idx].tmr_mc_addr_lo), 0, indirect);
399 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
400 				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
401 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
402 				 inst_idx].tmr_mc_addr_hi), 0, indirect);
403 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
404 				VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
405 		} else {
406 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
407 				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
408 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
409 				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
410 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
411 				VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
412 		}
413 		offset = 0;
414 	} else {
415 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
416 			VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
417 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
418 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
419 			VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
420 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
421 		offset = size;
422 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
423 			VCN, 0, regUVD_VCPU_CACHE_OFFSET0),
424 			AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
425 	}
426 
427 	if (!indirect)
428 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
429 			VCN, 0, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
430 	else
431 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
432 			VCN, 0, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
433 
434 	/* cache window 1: stack */
435 	if (!indirect) {
436 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
437 			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
438 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
439 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
440 			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
441 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
442 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
443 			VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
444 	} else {
445 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
446 			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
447 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
448 			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
449 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
450 			VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
451 	}
452 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
453 			VCN, 0, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
454 
455 	/* cache window 2: context */
456 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
457 		VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
458 		lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
459 			AMDGPU_VCN_STACK_SIZE), 0, indirect);
460 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
461 		VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
462 		upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
463 			AMDGPU_VCN_STACK_SIZE), 0, indirect);
464 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
465 		VCN, 0, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
466 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
467 		VCN, 0, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
468 
469 	/* non-cache window */
470 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
471 		VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
472 		lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
473 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
474 		VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
475 		upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
476 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
477 		VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
478 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
479 		VCN, 0, regUVD_VCPU_NONCACHE_SIZE0),
480 		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)), 0, indirect);
481 
482 	/* VCN global tiling registers */
483 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
484 		VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
485 }
486 
487 /**
488  * vcn_v5_0_2_disable_clock_gating - disable VCN clock gating
489  *
490  * @vinst: VCN instance
491  *
492  * Disable clock gating for VCN block
493  */
494 static void vcn_v5_0_2_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
495 {
496 }
497 
498 /**
499  * vcn_v5_0_2_enable_clock_gating - enable VCN clock gating
500  *
501  * @vinst: VCN instance
502  *
503  * Enable clock gating for VCN block
504  */
505 static void vcn_v5_0_2_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
506 {
507 }
508 
509 /**
510  * vcn_v5_0_2_pause_dpg_mode - VCN pause with dpg mode
511  *
512  * @vinst: VCN instance
513  * @new_state: pause state
514  *
515  * Pause dpg mode for VCN block
516  */
517 static int vcn_v5_0_2_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
518 				struct dpg_pause_state *new_state)
519 {
520 	struct amdgpu_device *adev = vinst->adev;
521 	uint32_t reg_data = 0;
522 	int vcn_inst;
523 
524 	vcn_inst = GET_INST(VCN, vinst->inst);
525 
526 	/* pause/unpause if state is changed */
527 	if (vinst->pause_state.fw_based != new_state->fw_based) {
528 		DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d %s\n",
529 			vinst->pause_state.fw_based, new_state->fw_based,
530 			new_state->fw_based ? "VCN_DPG_STATE__PAUSE" : "VCN_DPG_STATE__UNPAUSE");
531 		reg_data = RREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE) &
532 				(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
533 
534 		if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
535 			/* pause DPG */
536 			reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
537 			WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data);
538 
539 			/* wait for ACK */
540 			SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_DPG_PAUSE,
541 					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
542 					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
543 		} else {
544 			/* unpause DPG, no need to wait */
545 			reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
546 			WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data);
547 		}
548 		vinst->pause_state.fw_based = new_state->fw_based;
549 	}
550 
551 	return 0;
552 }
553 
554 /**
555  * vcn_v5_0_2_start_dpg_mode - VCN start with dpg mode
556  *
557  * @vinst: VCN instance
558  * @indirect: indirectly write sram
559  *
560  * Start VCN block with dpg mode
561  */
562 static int vcn_v5_0_2_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
563 				     bool indirect)
564 {
565 	struct amdgpu_device *adev = vinst->adev;
566 	int inst_idx = vinst->inst;
567 	struct amdgpu_vcn5_fw_shared *fw_shared =
568 		adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
569 	struct amdgpu_ring *ring;
570 	struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__PAUSE};
571 	int vcn_inst, ret;
572 	uint32_t tmp;
573 
574 	vcn_inst = GET_INST(VCN, inst_idx);
575 
576 	/* disable register anti-hang mechanism */
577 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 1,
578 		~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
579 
580 	/* enable dynamic power gating mode */
581 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS);
582 	tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
583 	WREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS, tmp);
584 
585 	if (indirect) {
586 		adev->vcn.inst[inst_idx].dpg_sram_curr_addr =
587 			(uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
588 		/* Use dummy register 0xDEADBEEF passing AID selection to PSP FW */
589 		WREG32_SOC24_DPG_MODE(inst_idx, 0xDEADBEEF,
590 				adev->vcn.inst[inst_idx].aid_id, 0, true);
591 	}
592 
593 	/* enable VCPU clock */
594 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
595 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK;
596 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
597 		VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
598 
599 	/* disable master interrupt */
600 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
601 		VCN, 0, regUVD_MASTINT_EN), 0, 0, indirect);
602 
603 	/* setup regUVD_LMI_CTRL */
604 	tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
605 		UVD_LMI_CTRL__REQ_MODE_MASK |
606 		UVD_LMI_CTRL__CRC_RESET_MASK |
607 		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
608 		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
609 		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
610 		(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
611 		0x00100000L);
612 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
613 		VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect);
614 
615 	vcn_v5_0_2_mc_resume_dpg_mode(vinst, indirect);
616 
617 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
618 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
619 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
620 		VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
621 
622 	/* enable LMI MC and UMC channels */
623 	tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
624 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
625 		VCN, 0, regUVD_LMI_CTRL2), tmp, 0, indirect);
626 
627 	/* enable master interrupt */
628 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
629 		VCN, 0, regUVD_MASTINT_EN),
630 		UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
631 
632 	if (indirect) {
633 		ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
634 		if (ret) {
635 			dev_err(adev->dev, "vcn sram load failed %d\n", ret);
636 			return ret;
637 		}
638 	}
639 
640 	/* resetting ring, fw should not check RB ring */
641 	fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
642 
643 	/* Pause dpg */
644 	vcn_v5_0_2_pause_dpg_mode(vinst, &state);
645 
646 	ring = &adev->vcn.inst[inst_idx].ring_enc[0];
647 
648 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, lower_32_bits(ring->gpu_addr));
649 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
650 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / sizeof(uint32_t));
651 
652 	tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
653 	tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
654 	WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
655 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
656 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
657 
658 	ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
659 
660 	tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
661 	tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
662 	WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
663 	/* resetting done, fw can check RB ring */
664 	fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
665 
666 	WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
667 		ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
668 		VCN_RB1_DB_CTRL__EN_MASK);
669 	/* Read DB_CTRL to flush the write DB_CTRL command. */
670 	RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
671 
672 	return 0;
673 }
674 
675 /**
676  * vcn_v5_0_2_start - VCN start
677  *
678  * @vinst: VCN instance
679  *
680  * Start VCN block
681  */
682 static int vcn_v5_0_2_start(struct amdgpu_vcn_inst *vinst)
683 {
684 	struct amdgpu_device *adev = vinst->adev;
685 	int i = vinst->inst;
686 	struct amdgpu_vcn5_fw_shared *fw_shared;
687 	struct amdgpu_ring *ring;
688 	uint32_t tmp;
689 	int j, k, r, vcn_inst;
690 
691 	fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
692 
693 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
694 		return vcn_v5_0_2_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
695 
696 	vcn_inst = GET_INST(VCN, i);
697 
698 	/* set VCN status busy */
699 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
700 	WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
701 
702 	/* enable VCPU clock */
703 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
704 		 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
705 
706 	/* disable master interrupt */
707 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
708 		 ~UVD_MASTINT_EN__VCPU_EN_MASK);
709 
710 	/* enable LMI MC and UMC channels */
711 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
712 		 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
713 
714 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
715 	tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
716 	tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
717 	WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
718 
719 	/* setup regUVD_LMI_CTRL */
720 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
721 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL, tmp |
722 		     UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
723 		     UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
724 		     UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
725 		     UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
726 
727 	vcn_v5_0_2_mc_resume(vinst);
728 
729 	/* VCN global tiling registers */
730 	WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
731 		     adev->gfx.config.gb_addr_config);
732 
733 	/* unblock VCPU register access */
734 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
735 		 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
736 
737 	/* release VCPU reset to boot */
738 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
739 		 ~UVD_VCPU_CNTL__BLK_RST_MASK);
740 
741 	for (j = 0; j < 10; ++j) {
742 		uint32_t status;
743 
744 		for (k = 0; k < 100; ++k) {
745 			status = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
746 			if (status & 2)
747 				break;
748 			mdelay(1000);
749 			if (amdgpu_emu_mode == 1)
750 				msleep(520);
751 		}
752 
753 		if (amdgpu_emu_mode == 1) {
754 			r = -1;
755 			if (status & 2) {
756 				r = 0;
757 				break;
758 			}
759 		} else {
760 			r = 0;
761 			if (status & 2)
762 				break;
763 
764 			dev_err(adev->dev,
765 				"VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
766 			WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
767 				 UVD_VCPU_CNTL__BLK_RST_MASK,
768 				 ~UVD_VCPU_CNTL__BLK_RST_MASK);
769 			mdelay(10);
770 			WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
771 				 ~UVD_VCPU_CNTL__BLK_RST_MASK);
772 
773 			mdelay(10);
774 			r = -1;
775 		}
776 	}
777 
778 	if (r) {
779 		dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
780 		return r;
781 	}
782 
783 	/* enable master interrupt */
784 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
785 		 UVD_MASTINT_EN__VCPU_EN_MASK,
786 		 ~UVD_MASTINT_EN__VCPU_EN_MASK);
787 
788 	/* clear the busy bit of VCN_STATUS */
789 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
790 		 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
791 
792 	ring = &adev->vcn.inst[i].ring_enc[0];
793 
794 	WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
795 		     ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
796 		     VCN_RB1_DB_CTRL__EN_MASK);
797 
798 	/* Read DB_CTRL to flush the write DB_CTRL command. */
799 	RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
800 
801 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, ring->gpu_addr);
802 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
803 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / 4);
804 
805 	tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
806 	tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
807 	WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
808 	fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
809 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
810 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
811 
812 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR);
813 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp);
814 	ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
815 
816 	tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
817 	tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
818 	WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
819 	fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
820 
821 	return 0;
822 }
823 
824 /**
825  * vcn_v5_0_2_stop_dpg_mode - VCN stop with dpg mode
826  *
827  * @vinst: VCN instance
828  *
829  * Stop VCN block with dpg mode
830  */
831 static void vcn_v5_0_2_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
832 {
833 	struct amdgpu_device *adev = vinst->adev;
834 	int inst_idx = vinst->inst;
835 	uint32_t tmp;
836 	int vcn_inst;
837 	struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
838 
839 	vcn_inst = GET_INST(VCN, inst_idx);
840 
841 	/* Unpause dpg */
842 	vcn_v5_0_2_pause_dpg_mode(vinst, &state);
843 
844 	/* Wait for power status to be 1 */
845 	SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
846 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
847 
848 	/* wait for read ptr to be equal to write ptr */
849 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
850 	SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
851 
852 	/* disable dynamic power gating mode */
853 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0,
854 		~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
855 }
856 
857 /**
858  * vcn_v5_0_2_stop - VCN stop
859  *
860  * @vinst: VCN instance
861  *
862  * Stop VCN block
863  */
864 static int vcn_v5_0_2_stop(struct amdgpu_vcn_inst *vinst)
865 {
866 	struct amdgpu_device *adev = vinst->adev;
867 	int i = vinst->inst;
868 	struct amdgpu_vcn5_fw_shared *fw_shared;
869 	uint32_t tmp;
870 	int r = 0, vcn_inst;
871 
872 	vcn_inst = GET_INST(VCN, i);
873 
874 	fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
875 	fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
876 
877 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
878 		vcn_v5_0_2_stop_dpg_mode(vinst);
879 		return 0;
880 	}
881 
882 	/* wait for vcn idle */
883 	r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
884 	if (r)
885 		return r;
886 
887 	tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
888 		UVD_LMI_STATUS__READ_CLEAN_MASK |
889 		UVD_LMI_STATUS__WRITE_CLEAN_MASK |
890 		UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
891 	r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
892 	if (r)
893 		return r;
894 
895 	/* disable LMI UMC channel */
896 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
897 	tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
898 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
899 	tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
900 		UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
901 	r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
902 	if (r)
903 		return r;
904 
905 	/* block VCPU register access */
906 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
907 		 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
908 		 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
909 
910 	/* reset VCPU */
911 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
912 		 UVD_VCPU_CNTL__BLK_RST_MASK,
913 		 ~UVD_VCPU_CNTL__BLK_RST_MASK);
914 
915 	/* disable VCPU clock */
916 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
917 		 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
918 
919 	/* apply soft reset */
920 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
921 	tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
922 	WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
923 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
924 	tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
925 	WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
926 
927 	/* clear status */
928 	WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
929 
930 	return 0;
931 }
932 
933 /**
934  * vcn_v5_0_2_unified_ring_get_rptr - get unified read pointer
935  *
936  * @ring: amdgpu_ring pointer
937  *
938  * Returns the current hardware unified read pointer
939  */
940 static uint64_t vcn_v5_0_2_unified_ring_get_rptr(struct amdgpu_ring *ring)
941 {
942 	struct amdgpu_device *adev = ring->adev;
943 
944 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
945 		DRM_ERROR("wrong ring id is identified in %s", __func__);
946 
947 	return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_RPTR);
948 }
949 
950 /**
951  * vcn_v5_0_2_unified_ring_get_wptr - get unified write pointer
952  *
953  * @ring: amdgpu_ring pointer
954  *
955  * Returns the current hardware unified write pointer
956  */
957 static uint64_t vcn_v5_0_2_unified_ring_get_wptr(struct amdgpu_ring *ring)
958 {
959 	struct amdgpu_device *adev = ring->adev;
960 
961 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
962 		DRM_ERROR("wrong ring id is identified in %s", __func__);
963 
964 	if (ring->use_doorbell)
965 		return *ring->wptr_cpu_addr;
966 	else
967 		return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR);
968 }
969 
970 /**
971  * vcn_v5_0_2_unified_ring_set_wptr - set enc write pointer
972  *
973  * @ring: amdgpu_ring pointer
974  *
975  * Commits the enc write pointer to the hardware
976  */
977 static void vcn_v5_0_2_unified_ring_set_wptr(struct amdgpu_ring *ring)
978 {
979 	struct amdgpu_device *adev = ring->adev;
980 
981 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
982 		DRM_ERROR("wrong ring id is identified in %s", __func__);
983 
984 	if (ring->use_doorbell) {
985 		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
986 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
987 	} else {
988 		WREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR,
989 				lower_32_bits(ring->wptr));
990 	}
991 }
992 
993 static const struct amdgpu_ring_funcs vcn_v5_0_2_unified_ring_vm_funcs = {
994 	.type = AMDGPU_RING_TYPE_VCN_ENC,
995 	.align_mask = 0x3f,
996 	.nop = VCN_ENC_CMD_NO_OP,
997 	.get_rptr = vcn_v5_0_2_unified_ring_get_rptr,
998 	.get_wptr = vcn_v5_0_2_unified_ring_get_wptr,
999 	.set_wptr = vcn_v5_0_2_unified_ring_set_wptr,
1000 	.emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1001 			   SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1002 			   4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1003 			   5 +
1004 			   5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1005 			   1, /* vcn_v2_0_enc_ring_insert_end */
1006 	.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1007 	.emit_ib = vcn_v2_0_enc_ring_emit_ib,
1008 	.emit_fence = vcn_v2_0_enc_ring_emit_fence,
1009 	.emit_vm_flush = vcn_v4_0_3_enc_ring_emit_vm_flush,
1010 	.emit_hdp_flush = vcn_v4_0_3_ring_emit_hdp_flush,
1011 	.test_ring = amdgpu_vcn_enc_ring_test_ring,
1012 	.test_ib = amdgpu_vcn_unified_ring_test_ib,
1013 	.insert_nop = amdgpu_ring_insert_nop,
1014 	.insert_end = vcn_v2_0_enc_ring_insert_end,
1015 	.pad_ib = amdgpu_ring_generic_pad_ib,
1016 	.begin_use = amdgpu_vcn_ring_begin_use,
1017 	.end_use = amdgpu_vcn_ring_end_use,
1018 	.emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg,
1019 	.emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait,
1020 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1021 };
1022 
1023 /**
1024  * vcn_v5_0_2_set_unified_ring_funcs - set unified ring functions
1025  *
1026  * @adev: amdgpu_device pointer
1027  *
1028  * Set unified ring functions
1029  */
1030 static void vcn_v5_0_2_set_unified_ring_funcs(struct amdgpu_device *adev)
1031 {
1032 	int i, vcn_inst;
1033 
1034 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1035 		adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v5_0_2_unified_ring_vm_funcs;
1036 		adev->vcn.inst[i].ring_enc[0].me = i;
1037 		vcn_inst = GET_INST(VCN, i);
1038 		adev->vcn.inst[i].aid_id = vcn_inst / adev->vcn.num_inst_per_aid;
1039 	}
1040 }
1041 
1042 /**
1043  * vcn_v5_0_2_is_idle - check VCN block is idle
1044  *
1045  * @ip_block: Pointer to the amdgpu_ip_block structure
1046  *
1047  * Check whether VCN block is idle
1048  */
1049 static bool vcn_v5_0_2_is_idle(struct amdgpu_ip_block *ip_block)
1050 {
1051 	struct amdgpu_device *adev = ip_block->adev;
1052 	int i, ret = 1;
1053 
1054 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
1055 		ret &= (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) == UVD_STATUS__IDLE);
1056 
1057 	return ret;
1058 }
1059 
1060 /**
1061  * vcn_v5_0_2_wait_for_idle - wait for VCN block idle
1062  *
1063  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
1064  *
1065  * Wait for VCN block idle
1066  */
1067 static int vcn_v5_0_2_wait_for_idle(struct amdgpu_ip_block *ip_block)
1068 {
1069 	struct amdgpu_device *adev = ip_block->adev;
1070 	int i, ret = 0;
1071 
1072 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1073 		ret = SOC15_WAIT_ON_RREG(VCN, GET_INST(VCN, i), regUVD_STATUS, UVD_STATUS__IDLE,
1074 			UVD_STATUS__IDLE);
1075 		if (ret)
1076 			return ret;
1077 	}
1078 
1079 	return ret;
1080 }
1081 
1082 /**
1083  * vcn_v5_0_2_set_clockgating_state - set VCN block clockgating state
1084  *
1085  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
1086  * @state: clock gating state
1087  *
1088  * Set VCN block clockgating state
1089  */
1090 static int vcn_v5_0_2_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1091 					    enum amd_clockgating_state state)
1092 {
1093 	struct amdgpu_device *adev = ip_block->adev;
1094 	bool enable = state == AMD_CG_STATE_GATE;
1095 	int i;
1096 
1097 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1098 		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
1099 
1100 		if (enable) {
1101 			if (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) != UVD_STATUS__IDLE)
1102 				return -EBUSY;
1103 			vcn_v5_0_2_enable_clock_gating(vinst);
1104 		} else {
1105 			vcn_v5_0_2_disable_clock_gating(vinst);
1106 		}
1107 	}
1108 
1109 	return 0;
1110 }
1111 
1112 static int vcn_v5_0_2_set_pg_state(struct amdgpu_vcn_inst *vinst,
1113 				   enum amd_powergating_state state)
1114 {
1115 	int ret = 0;
1116 
1117 	if (state == vinst->cur_state)
1118 		return 0;
1119 
1120 	if (state == AMD_PG_STATE_GATE)
1121 		ret = vcn_v5_0_2_stop(vinst);
1122 	else
1123 		ret = vcn_v5_0_2_start(vinst);
1124 
1125 	if (!ret)
1126 		vinst->cur_state = state;
1127 
1128 	return ret;
1129 }
1130 
1131 /**
1132  * vcn_v5_0_2_process_interrupt - process VCN block interrupt
1133  *
1134  * @adev: amdgpu_device pointer
1135  * @source: interrupt sources
1136  * @entry: interrupt entry from clients and sources
1137  *
1138  * Process VCN block interrupt
1139  */
1140 static int vcn_v5_0_2_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
1141 	struct amdgpu_iv_entry *entry)
1142 {
1143 	uint32_t i, inst;
1144 
1145 	i = node_id_to_phys_map[entry->node_id];
1146 
1147 	DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n");
1148 
1149 	for (inst = 0; inst < adev->vcn.num_vcn_inst; ++inst)
1150 		if (adev->vcn.inst[inst].aid_id == i)
1151 			break;
1152 
1153 	if (inst >= adev->vcn.num_vcn_inst) {
1154 		dev_WARN_ONCE(adev->dev, 1,
1155 				"Interrupt received for unknown VCN instance %d",
1156 				entry->node_id);
1157 		return 0;
1158 	}
1159 
1160 	switch (entry->src_id) {
1161 	case VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1162 		amdgpu_fence_process(&adev->vcn.inst[inst].ring_enc[0]);
1163 		break;
1164 	default:
1165 		DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
1166 			  entry->src_id, entry->src_data[0]);
1167 		break;
1168 	}
1169 
1170 	return 0;
1171 }
1172 
1173 static const struct amdgpu_irq_src_funcs vcn_v5_0_2_irq_funcs = {
1174 	.process = vcn_v5_0_2_process_interrupt,
1175 };
1176 
1177 /**
1178  * vcn_v5_0_2_set_irq_funcs - set VCN block interrupt irq functions
1179  *
1180  * @adev: amdgpu_device pointer
1181  *
1182  * Set VCN block interrupt irq functions
1183  */
1184 static void vcn_v5_0_2_set_irq_funcs(struct amdgpu_device *adev)
1185 {
1186 	int i;
1187 
1188 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
1189 		adev->vcn.inst->irq.num_types++;
1190 	adev->vcn.inst->irq.funcs = &vcn_v5_0_2_irq_funcs;
1191 }
1192 
1193 static const struct amd_ip_funcs vcn_v5_0_2_ip_funcs = {
1194 	.name = "vcn_v5_0_2",
1195 	.early_init = vcn_v5_0_2_early_init,
1196 	.late_init = NULL,
1197 	.sw_init = vcn_v5_0_2_sw_init,
1198 	.sw_fini = vcn_v5_0_2_sw_fini,
1199 	.hw_init = vcn_v5_0_2_hw_init,
1200 	.hw_fini = vcn_v5_0_2_hw_fini,
1201 	.suspend = vcn_v5_0_2_suspend,
1202 	.resume = vcn_v5_0_2_resume,
1203 	.is_idle = vcn_v5_0_2_is_idle,
1204 	.wait_for_idle = vcn_v5_0_2_wait_for_idle,
1205 	.check_soft_reset = NULL,
1206 	.pre_soft_reset = NULL,
1207 	.soft_reset = NULL,
1208 	.post_soft_reset = NULL,
1209 	.set_clockgating_state = vcn_v5_0_2_set_clockgating_state,
1210 	.set_powergating_state = vcn_set_powergating_state,
1211 };
1212 
1213 const struct amdgpu_ip_block_version vcn_v5_0_2_ip_block = {
1214 	.type = AMD_IP_BLOCK_TYPE_VCN,
1215 	.major = 5,
1216 	.minor = 0,
1217 	.rev = 2,
1218 	.funcs = &vcn_v5_0_2_ip_funcs,
1219 };
1220