xref: /linux/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_2.c (revision 4a57e0913e8c7fff407e97909f4ae48caa84d612)
1*8433398cSSonny Jiang /*
2*8433398cSSonny Jiang  * Copyright 2025-2026 Advanced Micro Devices, Inc. All rights reserved.
3*8433398cSSonny Jiang  *
4*8433398cSSonny Jiang  * Permission is hereby granted, free of charge, to any person obtaining a
5*8433398cSSonny Jiang  * copy of this software and associated documentation files (the "Software"),
6*8433398cSSonny Jiang  * to deal in the Software without restriction, including without limitation
7*8433398cSSonny Jiang  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*8433398cSSonny Jiang  * and/or sell copies of the Software, and to permit persons to whom the
9*8433398cSSonny Jiang  * Software is furnished to do so, subject to the following conditions:
10*8433398cSSonny Jiang  *
11*8433398cSSonny Jiang  * The above copyright notice and this permission notice shall be included in
12*8433398cSSonny Jiang  * all copies or substantial portions of the Software.
13*8433398cSSonny Jiang  *
14*8433398cSSonny Jiang  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*8433398cSSonny Jiang  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*8433398cSSonny Jiang  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17*8433398cSSonny Jiang  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*8433398cSSonny Jiang  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*8433398cSSonny Jiang  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*8433398cSSonny Jiang  * OTHER DEALINGS IN THE SOFTWARE.
21*8433398cSSonny Jiang  *
22*8433398cSSonny Jiang  */
23*8433398cSSonny Jiang 
24*8433398cSSonny Jiang #include <linux/firmware.h>
25*8433398cSSonny Jiang #include "amdgpu.h"
26*8433398cSSonny Jiang #include "amdgpu_vcn.h"
27*8433398cSSonny Jiang #include "amdgpu_pm.h"
28*8433398cSSonny Jiang #include "soc15.h"
29*8433398cSSonny Jiang #include "soc15d.h"
30*8433398cSSonny Jiang #include "soc15_hw_ip.h"
31*8433398cSSonny Jiang #include "vcn_v2_0.h"
32*8433398cSSonny Jiang #include "vcn_v4_0_3.h"
33*8433398cSSonny Jiang 
34*8433398cSSonny Jiang #include "vcn/vcn_5_0_0_offset.h"
35*8433398cSSonny Jiang #include "vcn/vcn_5_0_0_sh_mask.h"
36*8433398cSSonny Jiang #include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
37*8433398cSSonny Jiang #include "vcn_v5_0_0.h"
38*8433398cSSonny Jiang #include "vcn_v5_0_1.h"
39*8433398cSSonny Jiang #include "vcn_v5_0_2.h"
40*8433398cSSonny Jiang 
41*8433398cSSonny Jiang #include <drm/drm_drv.h>
42*8433398cSSonny Jiang 
43*8433398cSSonny Jiang static void vcn_v5_0_2_set_unified_ring_funcs(struct amdgpu_device *adev);
44*8433398cSSonny Jiang static void vcn_v5_0_2_set_irq_funcs(struct amdgpu_device *adev);
45*8433398cSSonny Jiang static int vcn_v5_0_2_set_pg_state(struct amdgpu_vcn_inst *vinst,
46*8433398cSSonny Jiang 				   enum amd_powergating_state state);
47*8433398cSSonny Jiang static void vcn_v5_0_2_unified_ring_set_wptr(struct amdgpu_ring *ring);
48*8433398cSSonny Jiang 
49*8433398cSSonny Jiang /**
50*8433398cSSonny Jiang  * vcn_v5_0_2_early_init - set function pointers and load microcode
51*8433398cSSonny Jiang  *
52*8433398cSSonny Jiang  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
53*8433398cSSonny Jiang  *
54*8433398cSSonny Jiang  * Set ring and irq function pointers
55*8433398cSSonny Jiang  * Load microcode from filesystem
56*8433398cSSonny Jiang  */
57*8433398cSSonny Jiang static int vcn_v5_0_2_early_init(struct amdgpu_ip_block *ip_block)
58*8433398cSSonny Jiang {
59*8433398cSSonny Jiang 	struct amdgpu_device *adev = ip_block->adev;
60*8433398cSSonny Jiang 	int i, r;
61*8433398cSSonny Jiang 
62*8433398cSSonny Jiang 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
63*8433398cSSonny Jiang 		/* re-use enc ring as unified ring */
64*8433398cSSonny Jiang 		adev->vcn.inst[i].num_enc_rings = 1;
65*8433398cSSonny Jiang 
66*8433398cSSonny Jiang 	vcn_v5_0_2_set_unified_ring_funcs(adev);
67*8433398cSSonny Jiang 	vcn_v5_0_2_set_irq_funcs(adev);
68*8433398cSSonny Jiang 
69*8433398cSSonny Jiang 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
70*8433398cSSonny Jiang 		adev->vcn.inst[i].set_pg_state = vcn_v5_0_2_set_pg_state;
71*8433398cSSonny Jiang 
72*8433398cSSonny Jiang 		r = amdgpu_vcn_early_init(adev, i);
73*8433398cSSonny Jiang 		if (r)
74*8433398cSSonny Jiang 			return r;
75*8433398cSSonny Jiang 	}
76*8433398cSSonny Jiang 
77*8433398cSSonny Jiang 	return 0;
78*8433398cSSonny Jiang }
79*8433398cSSonny Jiang 
80*8433398cSSonny Jiang static void vcn_v5_0_2_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
81*8433398cSSonny Jiang {
82*8433398cSSonny Jiang 	struct amdgpu_vcn5_fw_shared *fw_shared;
83*8433398cSSonny Jiang 
84*8433398cSSonny Jiang 	fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
85*8433398cSSonny Jiang 
86*8433398cSSonny Jiang 	if (fw_shared->sq.is_enabled)
87*8433398cSSonny Jiang 		return;
88*8433398cSSonny Jiang 	fw_shared->present_flag_0 =
89*8433398cSSonny Jiang 		cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
90*8433398cSSonny Jiang 	fw_shared->sq.is_enabled = 1;
91*8433398cSSonny Jiang 
92*8433398cSSonny Jiang 	if (amdgpu_vcnfw_log)
93*8433398cSSonny Jiang 		amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]);
94*8433398cSSonny Jiang }
95*8433398cSSonny Jiang 
96*8433398cSSonny Jiang /**
97*8433398cSSonny Jiang  * vcn_v5_0_2_sw_init - sw init for VCN block
98*8433398cSSonny Jiang  *
99*8433398cSSonny Jiang  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
100*8433398cSSonny Jiang  *
101*8433398cSSonny Jiang  * Load firmware and sw initialization
102*8433398cSSonny Jiang  */
103*8433398cSSonny Jiang static int vcn_v5_0_2_sw_init(struct amdgpu_ip_block *ip_block)
104*8433398cSSonny Jiang {
105*8433398cSSonny Jiang 	struct amdgpu_device *adev = ip_block->adev;
106*8433398cSSonny Jiang 	struct amdgpu_ring *ring;
107*8433398cSSonny Jiang 	int i, r, vcn_inst;
108*8433398cSSonny Jiang 
109*8433398cSSonny Jiang 	/* VCN UNIFIED TRAP */
110*8433398cSSonny Jiang 	r = amdgpu_irq_add_id(adev, SOC_V1_0_IH_CLIENTID_VCN,
111*8433398cSSonny Jiang 		VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq);
112*8433398cSSonny Jiang 	if (r)
113*8433398cSSonny Jiang 		return r;
114*8433398cSSonny Jiang 
115*8433398cSSonny Jiang 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
116*8433398cSSonny Jiang 		vcn_inst = GET_INST(VCN, i);
117*8433398cSSonny Jiang 
118*8433398cSSonny Jiang 		r = amdgpu_vcn_sw_init(adev, i);
119*8433398cSSonny Jiang 		if (r)
120*8433398cSSonny Jiang 			return r;
121*8433398cSSonny Jiang 
122*8433398cSSonny Jiang 		amdgpu_vcn_setup_ucode(adev, i);
123*8433398cSSonny Jiang 
124*8433398cSSonny Jiang 		r = amdgpu_vcn_resume(adev, i);
125*8433398cSSonny Jiang 		if (r)
126*8433398cSSonny Jiang 			return r;
127*8433398cSSonny Jiang 
128*8433398cSSonny Jiang 		ring = &adev->vcn.inst[i].ring_enc[0];
129*8433398cSSonny Jiang 		ring->use_doorbell = true;
130*8433398cSSonny Jiang 
131*8433398cSSonny Jiang 		ring->doorbell_index =
132*8433398cSSonny Jiang 			(adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 32 * vcn_inst;
133*8433398cSSonny Jiang 
134*8433398cSSonny Jiang 		ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
135*8433398cSSonny Jiang 		sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
136*8433398cSSonny Jiang 
137*8433398cSSonny Jiang 		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
138*8433398cSSonny Jiang 					AMDGPU_RING_PRIO_DEFAULT, &adev->vcn.inst[i].sched_score);
139*8433398cSSonny Jiang 		if (r)
140*8433398cSSonny Jiang 			return r;
141*8433398cSSonny Jiang 
142*8433398cSSonny Jiang 		vcn_v5_0_2_fw_shared_init(adev, i);
143*8433398cSSonny Jiang 	}
144*8433398cSSonny Jiang 
145*8433398cSSonny Jiang 	/* TODO: Add queue reset mask when FW fully supports it */
146*8433398cSSonny Jiang 	adev->vcn.supported_reset =
147*8433398cSSonny Jiang 		amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
148*8433398cSSonny Jiang 
149*8433398cSSonny Jiang 	return amdgpu_vcn_sysfs_reset_mask_init(adev);
150*8433398cSSonny Jiang }
151*8433398cSSonny Jiang 
152*8433398cSSonny Jiang /**
153*8433398cSSonny Jiang  * vcn_v5_0_2_sw_fini - sw fini for VCN block
154*8433398cSSonny Jiang  *
155*8433398cSSonny Jiang  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
156*8433398cSSonny Jiang  *
157*8433398cSSonny Jiang  * VCN suspend and free up sw allocation
158*8433398cSSonny Jiang  */
159*8433398cSSonny Jiang static int vcn_v5_0_2_sw_fini(struct amdgpu_ip_block *ip_block)
160*8433398cSSonny Jiang {
161*8433398cSSonny Jiang 	struct amdgpu_device *adev = ip_block->adev;
162*8433398cSSonny Jiang 	int i, r, idx;
163*8433398cSSonny Jiang 
164*8433398cSSonny Jiang 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
165*8433398cSSonny Jiang 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
166*8433398cSSonny Jiang 			struct amdgpu_vcn5_fw_shared *fw_shared;
167*8433398cSSonny Jiang 
168*8433398cSSonny Jiang 			fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
169*8433398cSSonny Jiang 			fw_shared->present_flag_0 = 0;
170*8433398cSSonny Jiang 			fw_shared->sq.is_enabled = 0;
171*8433398cSSonny Jiang 		}
172*8433398cSSonny Jiang 
173*8433398cSSonny Jiang 		drm_dev_exit(idx);
174*8433398cSSonny Jiang 	}
175*8433398cSSonny Jiang 
176*8433398cSSonny Jiang 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
177*8433398cSSonny Jiang 		r = amdgpu_vcn_suspend(adev, i);
178*8433398cSSonny Jiang 		if (r)
179*8433398cSSonny Jiang 			return r;
180*8433398cSSonny Jiang 	}
181*8433398cSSonny Jiang 
182*8433398cSSonny Jiang 	for (i = 0; i < adev->vcn.num_vcn_inst; i++)
183*8433398cSSonny Jiang 		amdgpu_vcn_sw_fini(adev, i);
184*8433398cSSonny Jiang 
185*8433398cSSonny Jiang 	amdgpu_vcn_sysfs_reset_mask_fini(adev);
186*8433398cSSonny Jiang 
187*8433398cSSonny Jiang 	kfree(adev->vcn.ip_dump); //TODO check
188*8433398cSSonny Jiang 
189*8433398cSSonny Jiang 	return 0;
190*8433398cSSonny Jiang }
191*8433398cSSonny Jiang 
192*8433398cSSonny Jiang /**
193*8433398cSSonny Jiang  * vcn_v5_0_2_hw_init - start and test VCN block
194*8433398cSSonny Jiang  *
195*8433398cSSonny Jiang  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
196*8433398cSSonny Jiang  *
197*8433398cSSonny Jiang  * Initialize the hardware, boot up the VCPU and do some testing
198*8433398cSSonny Jiang  */
199*8433398cSSonny Jiang static int vcn_v5_0_2_hw_init(struct amdgpu_ip_block *ip_block)
200*8433398cSSonny Jiang {
201*8433398cSSonny Jiang 	struct amdgpu_device *adev = ip_block->adev;
202*8433398cSSonny Jiang 	struct amdgpu_ring *ring;
203*8433398cSSonny Jiang 	int i, r, vcn_inst;
204*8433398cSSonny Jiang 	uint32_t tmp;
205*8433398cSSonny Jiang 
206*8433398cSSonny Jiang 	if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x200)
207*8433398cSSonny Jiang 		adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
208*8433398cSSonny Jiang 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
209*8433398cSSonny Jiang 		vcn_inst = GET_INST(VCN, i);
210*8433398cSSonny Jiang 		ring = &adev->vcn.inst[i].ring_enc[0];
211*8433398cSSonny Jiang 
212*8433398cSSonny Jiang 		/* Remove Video Tiles antihang mechanism */
213*8433398cSSonny Jiang 		tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS);
214*8433398cSSonny Jiang 		tmp &= (~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
215*8433398cSSonny Jiang 		WREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS, tmp);
216*8433398cSSonny Jiang 
217*8433398cSSonny Jiang 		if (ring->use_doorbell)
218*8433398cSSonny Jiang 			adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
219*8433398cSSonny Jiang 				((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
220*8433398cSSonny Jiang 				 11 * vcn_inst),
221*8433398cSSonny Jiang 				adev->vcn.inst[i].aid_id);
222*8433398cSSonny Jiang 
223*8433398cSSonny Jiang 		/* Re-init fw_shared, if required */
224*8433398cSSonny Jiang 		vcn_v5_0_2_fw_shared_init(adev, i);
225*8433398cSSonny Jiang 
226*8433398cSSonny Jiang 		r = amdgpu_ring_test_helper(ring);
227*8433398cSSonny Jiang 		if (r)
228*8433398cSSonny Jiang 			return r;
229*8433398cSSonny Jiang 	}
230*8433398cSSonny Jiang 
231*8433398cSSonny Jiang 	return 0;
232*8433398cSSonny Jiang }
233*8433398cSSonny Jiang 
234*8433398cSSonny Jiang /**
235*8433398cSSonny Jiang  * vcn_v5_0_2_hw_fini - stop the hardware block
236*8433398cSSonny Jiang  *
237*8433398cSSonny Jiang  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
238*8433398cSSonny Jiang  *
239*8433398cSSonny Jiang  * Stop the VCN block, mark ring as not ready any more
240*8433398cSSonny Jiang  */
241*8433398cSSonny Jiang static int vcn_v5_0_2_hw_fini(struct amdgpu_ip_block *ip_block)
242*8433398cSSonny Jiang {
243*8433398cSSonny Jiang 	struct amdgpu_device *adev = ip_block->adev;
244*8433398cSSonny Jiang 	int i;
245*8433398cSSonny Jiang 
246*8433398cSSonny Jiang 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
247*8433398cSSonny Jiang 		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
248*8433398cSSonny Jiang 
249*8433398cSSonny Jiang 		cancel_delayed_work_sync(&adev->vcn.inst[i].idle_work);
250*8433398cSSonny Jiang 		if (vinst->cur_state != AMD_PG_STATE_GATE)
251*8433398cSSonny Jiang 			vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
252*8433398cSSonny Jiang 	}
253*8433398cSSonny Jiang 
254*8433398cSSonny Jiang 	return 0;
255*8433398cSSonny Jiang }
256*8433398cSSonny Jiang 
257*8433398cSSonny Jiang /**
258*8433398cSSonny Jiang  * vcn_v5_0_2_suspend - suspend VCN block
259*8433398cSSonny Jiang  *
260*8433398cSSonny Jiang  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
261*8433398cSSonny Jiang  *
262*8433398cSSonny Jiang  * HW fini and suspend VCN block
263*8433398cSSonny Jiang  */
264*8433398cSSonny Jiang static int vcn_v5_0_2_suspend(struct amdgpu_ip_block *ip_block)
265*8433398cSSonny Jiang {
266*8433398cSSonny Jiang 	struct amdgpu_device *adev = ip_block->adev;
267*8433398cSSonny Jiang 	int r, i;
268*8433398cSSonny Jiang 
269*8433398cSSonny Jiang 	r = vcn_v5_0_2_hw_fini(ip_block);
270*8433398cSSonny Jiang 	if (r)
271*8433398cSSonny Jiang 		return r;
272*8433398cSSonny Jiang 
273*8433398cSSonny Jiang 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
274*8433398cSSonny Jiang 		r = amdgpu_vcn_suspend(ip_block->adev, i);
275*8433398cSSonny Jiang 		if (r)
276*8433398cSSonny Jiang 			return r;
277*8433398cSSonny Jiang 	}
278*8433398cSSonny Jiang 
279*8433398cSSonny Jiang 	return r;
280*8433398cSSonny Jiang }
281*8433398cSSonny Jiang 
282*8433398cSSonny Jiang /**
283*8433398cSSonny Jiang  * vcn_v5_0_2_resume - resume VCN block
284*8433398cSSonny Jiang  *
285*8433398cSSonny Jiang  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
286*8433398cSSonny Jiang  *
287*8433398cSSonny Jiang  * Resume firmware and hw init VCN block
288*8433398cSSonny Jiang  */
289*8433398cSSonny Jiang static int vcn_v5_0_2_resume(struct amdgpu_ip_block *ip_block)
290*8433398cSSonny Jiang {
291*8433398cSSonny Jiang 	struct amdgpu_device *adev = ip_block->adev;
292*8433398cSSonny Jiang 	int r, i;
293*8433398cSSonny Jiang 
294*8433398cSSonny Jiang 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
295*8433398cSSonny Jiang 		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
296*8433398cSSonny Jiang 
297*8433398cSSonny Jiang 		if (amdgpu_in_reset(adev))
298*8433398cSSonny Jiang 			vinst->cur_state = AMD_PG_STATE_GATE;
299*8433398cSSonny Jiang 
300*8433398cSSonny Jiang 		r = amdgpu_vcn_resume(ip_block->adev, i);
301*8433398cSSonny Jiang 		if (r)
302*8433398cSSonny Jiang 			return r;
303*8433398cSSonny Jiang 	}
304*8433398cSSonny Jiang 
305*8433398cSSonny Jiang 	r = vcn_v5_0_2_hw_init(ip_block);
306*8433398cSSonny Jiang 
307*8433398cSSonny Jiang 	return r;
308*8433398cSSonny Jiang }
309*8433398cSSonny Jiang 
310*8433398cSSonny Jiang /**
311*8433398cSSonny Jiang  * vcn_v5_0_2_mc_resume - memory controller programming
312*8433398cSSonny Jiang  *
313*8433398cSSonny Jiang  * @vinst: VCN instance
314*8433398cSSonny Jiang  *
315*8433398cSSonny Jiang  * Let the VCN memory controller know it's offsets
316*8433398cSSonny Jiang  */
317*8433398cSSonny Jiang static void vcn_v5_0_2_mc_resume(struct amdgpu_vcn_inst *vinst)
318*8433398cSSonny Jiang {
319*8433398cSSonny Jiang 	struct amdgpu_device *adev = vinst->adev;
320*8433398cSSonny Jiang 	int inst = vinst->inst;
321*8433398cSSonny Jiang 	uint32_t offset, size, vcn_inst;
322*8433398cSSonny Jiang 	const struct common_firmware_header *hdr;
323*8433398cSSonny Jiang 
324*8433398cSSonny Jiang 	hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data;
325*8433398cSSonny Jiang 	size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
326*8433398cSSonny Jiang 
327*8433398cSSonny Jiang 	vcn_inst = GET_INST(VCN, inst);
328*8433398cSSonny Jiang 	/* cache window 0: fw */
329*8433398cSSonny Jiang 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
330*8433398cSSonny Jiang 		WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
331*8433398cSSonny Jiang 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
332*8433398cSSonny Jiang 		WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
333*8433398cSSonny Jiang 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
334*8433398cSSonny Jiang 		WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, 0);
335*8433398cSSonny Jiang 		offset = 0;
336*8433398cSSonny Jiang 	} else {
337*8433398cSSonny Jiang 		WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
338*8433398cSSonny Jiang 			lower_32_bits(adev->vcn.inst[inst].gpu_addr));
339*8433398cSSonny Jiang 		WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
340*8433398cSSonny Jiang 			upper_32_bits(adev->vcn.inst[inst].gpu_addr));
341*8433398cSSonny Jiang 		offset = size;
342*8433398cSSonny Jiang 		WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0,
343*8433398cSSonny Jiang 				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
344*8433398cSSonny Jiang 	}
345*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE0, size);
346*8433398cSSonny Jiang 
347*8433398cSSonny Jiang 	/* cache window 1: stack */
348*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
349*8433398cSSonny Jiang 		lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
350*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
351*8433398cSSonny Jiang 		upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
352*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET1, 0);
353*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
354*8433398cSSonny Jiang 
355*8433398cSSonny Jiang 	/* cache window 2: context */
356*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
357*8433398cSSonny Jiang 		lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
358*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
359*8433398cSSonny Jiang 		upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
360*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET2, 0);
361*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
362*8433398cSSonny Jiang 
363*8433398cSSonny Jiang 	/* non-cache window */
364*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
365*8433398cSSonny Jiang 		lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
366*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
367*8433398cSSonny Jiang 		upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
368*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
369*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_SIZE0,
370*8433398cSSonny Jiang 		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)));
371*8433398cSSonny Jiang }
372*8433398cSSonny Jiang 
373*8433398cSSonny Jiang /**
374*8433398cSSonny Jiang  * vcn_v5_0_2_mc_resume_dpg_mode - memory controller programming for dpg mode
375*8433398cSSonny Jiang  *
376*8433398cSSonny Jiang  * @vinst: VCN instance
377*8433398cSSonny Jiang  * @indirect: indirectly write sram
378*8433398cSSonny Jiang  *
379*8433398cSSonny Jiang  * Let the VCN memory controller know it's offsets with dpg mode
380*8433398cSSonny Jiang  */
381*8433398cSSonny Jiang static void vcn_v5_0_2_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
382*8433398cSSonny Jiang 					  bool indirect)
383*8433398cSSonny Jiang {
384*8433398cSSonny Jiang 	struct amdgpu_device *adev = vinst->adev;
385*8433398cSSonny Jiang 	int inst_idx = vinst->inst;
386*8433398cSSonny Jiang 	uint32_t offset, size;
387*8433398cSSonny Jiang 	const struct common_firmware_header *hdr;
388*8433398cSSonny Jiang 
389*8433398cSSonny Jiang 	hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
390*8433398cSSonny Jiang 	size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
391*8433398cSSonny Jiang 
392*8433398cSSonny Jiang 	/* cache window 0: fw */
393*8433398cSSonny Jiang 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
394*8433398cSSonny Jiang 		if (!indirect) {
395*8433398cSSonny Jiang 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
396*8433398cSSonny Jiang 				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
397*8433398cSSonny Jiang 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
398*8433398cSSonny Jiang 				 inst_idx].tmr_mc_addr_lo), 0, indirect);
399*8433398cSSonny Jiang 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
400*8433398cSSonny Jiang 				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
401*8433398cSSonny Jiang 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
402*8433398cSSonny Jiang 				 inst_idx].tmr_mc_addr_hi), 0, indirect);
403*8433398cSSonny Jiang 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
404*8433398cSSonny Jiang 				VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
405*8433398cSSonny Jiang 		} else {
406*8433398cSSonny Jiang 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
407*8433398cSSonny Jiang 				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
408*8433398cSSonny Jiang 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
409*8433398cSSonny Jiang 				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
410*8433398cSSonny Jiang 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
411*8433398cSSonny Jiang 				VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
412*8433398cSSonny Jiang 		}
413*8433398cSSonny Jiang 		offset = 0;
414*8433398cSSonny Jiang 	} else {
415*8433398cSSonny Jiang 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
416*8433398cSSonny Jiang 			VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
417*8433398cSSonny Jiang 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
418*8433398cSSonny Jiang 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
419*8433398cSSonny Jiang 			VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
420*8433398cSSonny Jiang 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
421*8433398cSSonny Jiang 		offset = size;
422*8433398cSSonny Jiang 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
423*8433398cSSonny Jiang 			VCN, 0, regUVD_VCPU_CACHE_OFFSET0),
424*8433398cSSonny Jiang 			AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
425*8433398cSSonny Jiang 	}
426*8433398cSSonny Jiang 
427*8433398cSSonny Jiang 	if (!indirect)
428*8433398cSSonny Jiang 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
429*8433398cSSonny Jiang 			VCN, 0, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
430*8433398cSSonny Jiang 	else
431*8433398cSSonny Jiang 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
432*8433398cSSonny Jiang 			VCN, 0, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
433*8433398cSSonny Jiang 
434*8433398cSSonny Jiang 	/* cache window 1: stack */
435*8433398cSSonny Jiang 	if (!indirect) {
436*8433398cSSonny Jiang 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
437*8433398cSSonny Jiang 			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
438*8433398cSSonny Jiang 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
439*8433398cSSonny Jiang 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
440*8433398cSSonny Jiang 			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
441*8433398cSSonny Jiang 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
442*8433398cSSonny Jiang 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
443*8433398cSSonny Jiang 			VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
444*8433398cSSonny Jiang 	} else {
445*8433398cSSonny Jiang 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
446*8433398cSSonny Jiang 			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
447*8433398cSSonny Jiang 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
448*8433398cSSonny Jiang 			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
449*8433398cSSonny Jiang 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
450*8433398cSSonny Jiang 			VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
451*8433398cSSonny Jiang 	}
452*8433398cSSonny Jiang 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
453*8433398cSSonny Jiang 			VCN, 0, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
454*8433398cSSonny Jiang 
455*8433398cSSonny Jiang 	/* cache window 2: context */
456*8433398cSSonny Jiang 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
457*8433398cSSonny Jiang 		VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
458*8433398cSSonny Jiang 		lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
459*8433398cSSonny Jiang 			AMDGPU_VCN_STACK_SIZE), 0, indirect);
460*8433398cSSonny Jiang 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
461*8433398cSSonny Jiang 		VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
462*8433398cSSonny Jiang 		upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
463*8433398cSSonny Jiang 			AMDGPU_VCN_STACK_SIZE), 0, indirect);
464*8433398cSSonny Jiang 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
465*8433398cSSonny Jiang 		VCN, 0, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
466*8433398cSSonny Jiang 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
467*8433398cSSonny Jiang 		VCN, 0, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
468*8433398cSSonny Jiang 
469*8433398cSSonny Jiang 	/* non-cache window */
470*8433398cSSonny Jiang 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
471*8433398cSSonny Jiang 		VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
472*8433398cSSonny Jiang 		lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
473*8433398cSSonny Jiang 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
474*8433398cSSonny Jiang 		VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
475*8433398cSSonny Jiang 		upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
476*8433398cSSonny Jiang 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
477*8433398cSSonny Jiang 		VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
478*8433398cSSonny Jiang 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
479*8433398cSSonny Jiang 		VCN, 0, regUVD_VCPU_NONCACHE_SIZE0),
480*8433398cSSonny Jiang 		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)), 0, indirect);
481*8433398cSSonny Jiang 
482*8433398cSSonny Jiang 	/* VCN global tiling registers */
483*8433398cSSonny Jiang 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
484*8433398cSSonny Jiang 		VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
485*8433398cSSonny Jiang }
486*8433398cSSonny Jiang 
487*8433398cSSonny Jiang /**
488*8433398cSSonny Jiang  * vcn_v5_0_2_disable_clock_gating - disable VCN clock gating
489*8433398cSSonny Jiang  *
490*8433398cSSonny Jiang  * @vinst: VCN instance
491*8433398cSSonny Jiang  *
492*8433398cSSonny Jiang  * Disable clock gating for VCN block
493*8433398cSSonny Jiang  */
494*8433398cSSonny Jiang static void vcn_v5_0_2_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
495*8433398cSSonny Jiang {
496*8433398cSSonny Jiang }
497*8433398cSSonny Jiang 
498*8433398cSSonny Jiang /**
499*8433398cSSonny Jiang  * vcn_v5_0_2_enable_clock_gating - enable VCN clock gating
500*8433398cSSonny Jiang  *
501*8433398cSSonny Jiang  * @vinst: VCN instance
502*8433398cSSonny Jiang  *
503*8433398cSSonny Jiang  * Enable clock gating for VCN block
504*8433398cSSonny Jiang  */
505*8433398cSSonny Jiang static void vcn_v5_0_2_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
506*8433398cSSonny Jiang {
507*8433398cSSonny Jiang }
508*8433398cSSonny Jiang 
509*8433398cSSonny Jiang /**
510*8433398cSSonny Jiang  * vcn_v5_0_2_pause_dpg_mode - VCN pause with dpg mode
511*8433398cSSonny Jiang  *
512*8433398cSSonny Jiang  * @vinst: VCN instance
513*8433398cSSonny Jiang  * @new_state: pause state
514*8433398cSSonny Jiang  *
515*8433398cSSonny Jiang  * Pause dpg mode for VCN block
516*8433398cSSonny Jiang  */
517*8433398cSSonny Jiang static int vcn_v5_0_2_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
518*8433398cSSonny Jiang 				struct dpg_pause_state *new_state)
519*8433398cSSonny Jiang {
520*8433398cSSonny Jiang 	struct amdgpu_device *adev = vinst->adev;
521*8433398cSSonny Jiang 	uint32_t reg_data = 0;
522*8433398cSSonny Jiang 	int vcn_inst;
523*8433398cSSonny Jiang 
524*8433398cSSonny Jiang 	vcn_inst = GET_INST(VCN, vinst->inst);
525*8433398cSSonny Jiang 
526*8433398cSSonny Jiang 	/* pause/unpause if state is changed */
527*8433398cSSonny Jiang 	if (vinst->pause_state.fw_based != new_state->fw_based) {
528*8433398cSSonny Jiang 		DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d %s\n",
529*8433398cSSonny Jiang 			vinst->pause_state.fw_based, new_state->fw_based,
530*8433398cSSonny Jiang 			new_state->fw_based ? "VCN_DPG_STATE__PAUSE" : "VCN_DPG_STATE__UNPAUSE");
531*8433398cSSonny Jiang 		reg_data = RREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE) &
532*8433398cSSonny Jiang 				(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
533*8433398cSSonny Jiang 
534*8433398cSSonny Jiang 		if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
535*8433398cSSonny Jiang 			/* pause DPG */
536*8433398cSSonny Jiang 			reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
537*8433398cSSonny Jiang 			WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data);
538*8433398cSSonny Jiang 
539*8433398cSSonny Jiang 			/* wait for ACK */
540*8433398cSSonny Jiang 			SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_DPG_PAUSE,
541*8433398cSSonny Jiang 					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
542*8433398cSSonny Jiang 					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
543*8433398cSSonny Jiang 		} else {
544*8433398cSSonny Jiang 			/* unpause DPG, no need to wait */
545*8433398cSSonny Jiang 			reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
546*8433398cSSonny Jiang 			WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data);
547*8433398cSSonny Jiang 		}
548*8433398cSSonny Jiang 		vinst->pause_state.fw_based = new_state->fw_based;
549*8433398cSSonny Jiang 	}
550*8433398cSSonny Jiang 
551*8433398cSSonny Jiang 	return 0;
552*8433398cSSonny Jiang }
553*8433398cSSonny Jiang 
554*8433398cSSonny Jiang /**
555*8433398cSSonny Jiang  * vcn_v5_0_2_start_dpg_mode - VCN start with dpg mode
556*8433398cSSonny Jiang  *
557*8433398cSSonny Jiang  * @vinst: VCN instance
558*8433398cSSonny Jiang  * @indirect: indirectly write sram
559*8433398cSSonny Jiang  *
560*8433398cSSonny Jiang  * Start VCN block with dpg mode
561*8433398cSSonny Jiang  */
562*8433398cSSonny Jiang static int vcn_v5_0_2_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
563*8433398cSSonny Jiang 				     bool indirect)
564*8433398cSSonny Jiang {
565*8433398cSSonny Jiang 	struct amdgpu_device *adev = vinst->adev;
566*8433398cSSonny Jiang 	int inst_idx = vinst->inst;
567*8433398cSSonny Jiang 	struct amdgpu_vcn5_fw_shared *fw_shared =
568*8433398cSSonny Jiang 		adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
569*8433398cSSonny Jiang 	struct amdgpu_ring *ring;
570*8433398cSSonny Jiang 	struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__PAUSE};
571*8433398cSSonny Jiang 	int vcn_inst, ret;
572*8433398cSSonny Jiang 	uint32_t tmp;
573*8433398cSSonny Jiang 
574*8433398cSSonny Jiang 	vcn_inst = GET_INST(VCN, inst_idx);
575*8433398cSSonny Jiang 
576*8433398cSSonny Jiang 	/* disable register anti-hang mechanism */
577*8433398cSSonny Jiang 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 1,
578*8433398cSSonny Jiang 		~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
579*8433398cSSonny Jiang 
580*8433398cSSonny Jiang 	/* enable dynamic power gating mode */
581*8433398cSSonny Jiang 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS);
582*8433398cSSonny Jiang 	tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
583*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS, tmp);
584*8433398cSSonny Jiang 
585*8433398cSSonny Jiang 	if (indirect) {
586*8433398cSSonny Jiang 		adev->vcn.inst[inst_idx].dpg_sram_curr_addr =
587*8433398cSSonny Jiang 			(uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
588*8433398cSSonny Jiang 		/* Use dummy register 0xDEADBEEF passing AID selection to PSP FW */
589*8433398cSSonny Jiang 		WREG32_SOC24_DPG_MODE(inst_idx, 0xDEADBEEF,
590*8433398cSSonny Jiang 				adev->vcn.inst[inst_idx].aid_id, 0, true);
591*8433398cSSonny Jiang 	}
592*8433398cSSonny Jiang 
593*8433398cSSonny Jiang 	/* enable VCPU clock */
594*8433398cSSonny Jiang 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
595*8433398cSSonny Jiang 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK;
596*8433398cSSonny Jiang 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
597*8433398cSSonny Jiang 		VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
598*8433398cSSonny Jiang 
599*8433398cSSonny Jiang 	/* disable master interrupt */
600*8433398cSSonny Jiang 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
601*8433398cSSonny Jiang 		VCN, 0, regUVD_MASTINT_EN), 0, 0, indirect);
602*8433398cSSonny Jiang 
603*8433398cSSonny Jiang 	/* setup regUVD_LMI_CTRL */
604*8433398cSSonny Jiang 	tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
605*8433398cSSonny Jiang 		UVD_LMI_CTRL__REQ_MODE_MASK |
606*8433398cSSonny Jiang 		UVD_LMI_CTRL__CRC_RESET_MASK |
607*8433398cSSonny Jiang 		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
608*8433398cSSonny Jiang 		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
609*8433398cSSonny Jiang 		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
610*8433398cSSonny Jiang 		(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
611*8433398cSSonny Jiang 		0x00100000L);
612*8433398cSSonny Jiang 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
613*8433398cSSonny Jiang 		VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect);
614*8433398cSSonny Jiang 
615*8433398cSSonny Jiang 	vcn_v5_0_2_mc_resume_dpg_mode(vinst, indirect);
616*8433398cSSonny Jiang 
617*8433398cSSonny Jiang 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
618*8433398cSSonny Jiang 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
619*8433398cSSonny Jiang 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
620*8433398cSSonny Jiang 		VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
621*8433398cSSonny Jiang 
622*8433398cSSonny Jiang 	/* enable LMI MC and UMC channels */
623*8433398cSSonny Jiang 	tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
624*8433398cSSonny Jiang 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
625*8433398cSSonny Jiang 		VCN, 0, regUVD_LMI_CTRL2), tmp, 0, indirect);
626*8433398cSSonny Jiang 
627*8433398cSSonny Jiang 	/* enable master interrupt */
628*8433398cSSonny Jiang 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
629*8433398cSSonny Jiang 		VCN, 0, regUVD_MASTINT_EN),
630*8433398cSSonny Jiang 		UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
631*8433398cSSonny Jiang 
632*8433398cSSonny Jiang 	if (indirect) {
633*8433398cSSonny Jiang 		ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
634*8433398cSSonny Jiang 		if (ret) {
635*8433398cSSonny Jiang 			dev_err(adev->dev, "vcn sram load failed %d\n", ret);
636*8433398cSSonny Jiang 			return ret;
637*8433398cSSonny Jiang 		}
638*8433398cSSonny Jiang 	}
639*8433398cSSonny Jiang 
640*8433398cSSonny Jiang 	/* resetting ring, fw should not check RB ring */
641*8433398cSSonny Jiang 	fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
642*8433398cSSonny Jiang 
643*8433398cSSonny Jiang 	/* Pause dpg */
644*8433398cSSonny Jiang 	vcn_v5_0_2_pause_dpg_mode(vinst, &state);
645*8433398cSSonny Jiang 
646*8433398cSSonny Jiang 	ring = &adev->vcn.inst[inst_idx].ring_enc[0];
647*8433398cSSonny Jiang 
648*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, lower_32_bits(ring->gpu_addr));
649*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
650*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / sizeof(uint32_t));
651*8433398cSSonny Jiang 
652*8433398cSSonny Jiang 	tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
653*8433398cSSonny Jiang 	tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
654*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
655*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
656*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
657*8433398cSSonny Jiang 
658*8433398cSSonny Jiang 	ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
659*8433398cSSonny Jiang 
660*8433398cSSonny Jiang 	tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
661*8433398cSSonny Jiang 	tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
662*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
663*8433398cSSonny Jiang 	/* resetting done, fw can check RB ring */
664*8433398cSSonny Jiang 	fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
665*8433398cSSonny Jiang 
666*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
667*8433398cSSonny Jiang 		ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
668*8433398cSSonny Jiang 		VCN_RB1_DB_CTRL__EN_MASK);
669*8433398cSSonny Jiang 	/* Read DB_CTRL to flush the write DB_CTRL command. */
670*8433398cSSonny Jiang 	RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
671*8433398cSSonny Jiang 
672*8433398cSSonny Jiang 	return 0;
673*8433398cSSonny Jiang }
674*8433398cSSonny Jiang 
675*8433398cSSonny Jiang /**
676*8433398cSSonny Jiang  * vcn_v5_0_2_start - VCN start
677*8433398cSSonny Jiang  *
678*8433398cSSonny Jiang  * @vinst: VCN instance
679*8433398cSSonny Jiang  *
680*8433398cSSonny Jiang  * Start VCN block
681*8433398cSSonny Jiang  */
682*8433398cSSonny Jiang static int vcn_v5_0_2_start(struct amdgpu_vcn_inst *vinst)
683*8433398cSSonny Jiang {
684*8433398cSSonny Jiang 	struct amdgpu_device *adev = vinst->adev;
685*8433398cSSonny Jiang 	int i = vinst->inst;
686*8433398cSSonny Jiang 	struct amdgpu_vcn5_fw_shared *fw_shared;
687*8433398cSSonny Jiang 	struct amdgpu_ring *ring;
688*8433398cSSonny Jiang 	uint32_t tmp;
689*8433398cSSonny Jiang 	int j, k, r, vcn_inst;
690*8433398cSSonny Jiang 
691*8433398cSSonny Jiang 	fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
692*8433398cSSonny Jiang 
693*8433398cSSonny Jiang 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
694*8433398cSSonny Jiang 		return vcn_v5_0_2_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
695*8433398cSSonny Jiang 
696*8433398cSSonny Jiang 	vcn_inst = GET_INST(VCN, i);
697*8433398cSSonny Jiang 
698*8433398cSSonny Jiang 	/* set VCN status busy */
699*8433398cSSonny Jiang 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
700*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
701*8433398cSSonny Jiang 
702*8433398cSSonny Jiang 	/* enable VCPU clock */
703*8433398cSSonny Jiang 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
704*8433398cSSonny Jiang 		 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
705*8433398cSSonny Jiang 
706*8433398cSSonny Jiang 	/* disable master interrupt */
707*8433398cSSonny Jiang 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
708*8433398cSSonny Jiang 		 ~UVD_MASTINT_EN__VCPU_EN_MASK);
709*8433398cSSonny Jiang 
710*8433398cSSonny Jiang 	/* enable LMI MC and UMC channels */
711*8433398cSSonny Jiang 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
712*8433398cSSonny Jiang 		 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
713*8433398cSSonny Jiang 
714*8433398cSSonny Jiang 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
715*8433398cSSonny Jiang 	tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
716*8433398cSSonny Jiang 	tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
717*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
718*8433398cSSonny Jiang 
719*8433398cSSonny Jiang 	/* setup regUVD_LMI_CTRL */
720*8433398cSSonny Jiang 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
721*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL, tmp |
722*8433398cSSonny Jiang 		     UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
723*8433398cSSonny Jiang 		     UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
724*8433398cSSonny Jiang 		     UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
725*8433398cSSonny Jiang 		     UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
726*8433398cSSonny Jiang 
727*8433398cSSonny Jiang 	vcn_v5_0_2_mc_resume(vinst);
728*8433398cSSonny Jiang 
729*8433398cSSonny Jiang 	/* VCN global tiling registers */
730*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
731*8433398cSSonny Jiang 		     adev->gfx.config.gb_addr_config);
732*8433398cSSonny Jiang 
733*8433398cSSonny Jiang 	/* unblock VCPU register access */
734*8433398cSSonny Jiang 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
735*8433398cSSonny Jiang 		 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
736*8433398cSSonny Jiang 
737*8433398cSSonny Jiang 	/* release VCPU reset to boot */
738*8433398cSSonny Jiang 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
739*8433398cSSonny Jiang 		 ~UVD_VCPU_CNTL__BLK_RST_MASK);
740*8433398cSSonny Jiang 
741*8433398cSSonny Jiang 	for (j = 0; j < 10; ++j) {
742*8433398cSSonny Jiang 		uint32_t status;
743*8433398cSSonny Jiang 
744*8433398cSSonny Jiang 		for (k = 0; k < 100; ++k) {
745*8433398cSSonny Jiang 			status = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
746*8433398cSSonny Jiang 			if (status & 2)
747*8433398cSSonny Jiang 				break;
748*8433398cSSonny Jiang 			mdelay(1000);
749*8433398cSSonny Jiang 			if (amdgpu_emu_mode == 1)
750*8433398cSSonny Jiang 				msleep(520);
751*8433398cSSonny Jiang 		}
752*8433398cSSonny Jiang 
753*8433398cSSonny Jiang 		if (amdgpu_emu_mode == 1) {
754*8433398cSSonny Jiang 			r = -1;
755*8433398cSSonny Jiang 			if (status & 2) {
756*8433398cSSonny Jiang 				r = 0;
757*8433398cSSonny Jiang 				break;
758*8433398cSSonny Jiang 			}
759*8433398cSSonny Jiang 		} else {
760*8433398cSSonny Jiang 			r = 0;
761*8433398cSSonny Jiang 			if (status & 2)
762*8433398cSSonny Jiang 				break;
763*8433398cSSonny Jiang 
764*8433398cSSonny Jiang 			dev_err(adev->dev,
765*8433398cSSonny Jiang 				"VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
766*8433398cSSonny Jiang 			WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
767*8433398cSSonny Jiang 				 UVD_VCPU_CNTL__BLK_RST_MASK,
768*8433398cSSonny Jiang 				 ~UVD_VCPU_CNTL__BLK_RST_MASK);
769*8433398cSSonny Jiang 			mdelay(10);
770*8433398cSSonny Jiang 			WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
771*8433398cSSonny Jiang 				 ~UVD_VCPU_CNTL__BLK_RST_MASK);
772*8433398cSSonny Jiang 
773*8433398cSSonny Jiang 			mdelay(10);
774*8433398cSSonny Jiang 			r = -1;
775*8433398cSSonny Jiang 		}
776*8433398cSSonny Jiang 	}
777*8433398cSSonny Jiang 
778*8433398cSSonny Jiang 	if (r) {
779*8433398cSSonny Jiang 		dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
780*8433398cSSonny Jiang 		return r;
781*8433398cSSonny Jiang 	}
782*8433398cSSonny Jiang 
783*8433398cSSonny Jiang 	/* enable master interrupt */
784*8433398cSSonny Jiang 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
785*8433398cSSonny Jiang 		 UVD_MASTINT_EN__VCPU_EN_MASK,
786*8433398cSSonny Jiang 		 ~UVD_MASTINT_EN__VCPU_EN_MASK);
787*8433398cSSonny Jiang 
788*8433398cSSonny Jiang 	/* clear the busy bit of VCN_STATUS */
789*8433398cSSonny Jiang 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
790*8433398cSSonny Jiang 		 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
791*8433398cSSonny Jiang 
792*8433398cSSonny Jiang 	ring = &adev->vcn.inst[i].ring_enc[0];
793*8433398cSSonny Jiang 
794*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
795*8433398cSSonny Jiang 		     ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
796*8433398cSSonny Jiang 		     VCN_RB1_DB_CTRL__EN_MASK);
797*8433398cSSonny Jiang 
798*8433398cSSonny Jiang 	/* Read DB_CTRL to flush the write DB_CTRL command. */
799*8433398cSSonny Jiang 	RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
800*8433398cSSonny Jiang 
801*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, ring->gpu_addr);
802*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
803*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / 4);
804*8433398cSSonny Jiang 
805*8433398cSSonny Jiang 	tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
806*8433398cSSonny Jiang 	tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
807*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
808*8433398cSSonny Jiang 	fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
809*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
810*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
811*8433398cSSonny Jiang 
812*8433398cSSonny Jiang 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR);
813*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp);
814*8433398cSSonny Jiang 	ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
815*8433398cSSonny Jiang 
816*8433398cSSonny Jiang 	tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
817*8433398cSSonny Jiang 	tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
818*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
819*8433398cSSonny Jiang 	fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
820*8433398cSSonny Jiang 
821*8433398cSSonny Jiang 	return 0;
822*8433398cSSonny Jiang }
823*8433398cSSonny Jiang 
824*8433398cSSonny Jiang /**
825*8433398cSSonny Jiang  * vcn_v5_0_2_stop_dpg_mode - VCN stop with dpg mode
826*8433398cSSonny Jiang  *
827*8433398cSSonny Jiang  * @vinst: VCN instance
828*8433398cSSonny Jiang  *
829*8433398cSSonny Jiang  * Stop VCN block with dpg mode
830*8433398cSSonny Jiang  */
831*8433398cSSonny Jiang static void vcn_v5_0_2_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
832*8433398cSSonny Jiang {
833*8433398cSSonny Jiang 	struct amdgpu_device *adev = vinst->adev;
834*8433398cSSonny Jiang 	int inst_idx = vinst->inst;
835*8433398cSSonny Jiang 	uint32_t tmp;
836*8433398cSSonny Jiang 	int vcn_inst;
837*8433398cSSonny Jiang 	struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
838*8433398cSSonny Jiang 
839*8433398cSSonny Jiang 	vcn_inst = GET_INST(VCN, inst_idx);
840*8433398cSSonny Jiang 
841*8433398cSSonny Jiang 	/* Unpause dpg */
842*8433398cSSonny Jiang 	vcn_v5_0_2_pause_dpg_mode(vinst, &state);
843*8433398cSSonny Jiang 
844*8433398cSSonny Jiang 	/* Wait for power status to be 1 */
845*8433398cSSonny Jiang 	SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
846*8433398cSSonny Jiang 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
847*8433398cSSonny Jiang 
848*8433398cSSonny Jiang 	/* wait for read ptr to be equal to write ptr */
849*8433398cSSonny Jiang 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
850*8433398cSSonny Jiang 	SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
851*8433398cSSonny Jiang 
852*8433398cSSonny Jiang 	/* disable dynamic power gating mode */
853*8433398cSSonny Jiang 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0,
854*8433398cSSonny Jiang 		~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
855*8433398cSSonny Jiang }
856*8433398cSSonny Jiang 
857*8433398cSSonny Jiang /**
858*8433398cSSonny Jiang  * vcn_v5_0_2_stop - VCN stop
859*8433398cSSonny Jiang  *
860*8433398cSSonny Jiang  * @vinst: VCN instance
861*8433398cSSonny Jiang  *
862*8433398cSSonny Jiang  * Stop VCN block
863*8433398cSSonny Jiang  */
864*8433398cSSonny Jiang static int vcn_v5_0_2_stop(struct amdgpu_vcn_inst *vinst)
865*8433398cSSonny Jiang {
866*8433398cSSonny Jiang 	struct amdgpu_device *adev = vinst->adev;
867*8433398cSSonny Jiang 	int i = vinst->inst;
868*8433398cSSonny Jiang 	struct amdgpu_vcn5_fw_shared *fw_shared;
869*8433398cSSonny Jiang 	uint32_t tmp;
870*8433398cSSonny Jiang 	int r = 0, vcn_inst;
871*8433398cSSonny Jiang 
872*8433398cSSonny Jiang 	vcn_inst = GET_INST(VCN, i);
873*8433398cSSonny Jiang 
874*8433398cSSonny Jiang 	fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
875*8433398cSSonny Jiang 	fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
876*8433398cSSonny Jiang 
877*8433398cSSonny Jiang 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
878*8433398cSSonny Jiang 		vcn_v5_0_2_stop_dpg_mode(vinst);
879*8433398cSSonny Jiang 		return 0;
880*8433398cSSonny Jiang 	}
881*8433398cSSonny Jiang 
882*8433398cSSonny Jiang 	/* wait for vcn idle */
883*8433398cSSonny Jiang 	r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
884*8433398cSSonny Jiang 	if (r)
885*8433398cSSonny Jiang 		return r;
886*8433398cSSonny Jiang 
887*8433398cSSonny Jiang 	tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
888*8433398cSSonny Jiang 		UVD_LMI_STATUS__READ_CLEAN_MASK |
889*8433398cSSonny Jiang 		UVD_LMI_STATUS__WRITE_CLEAN_MASK |
890*8433398cSSonny Jiang 		UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
891*8433398cSSonny Jiang 	r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
892*8433398cSSonny Jiang 	if (r)
893*8433398cSSonny Jiang 		return r;
894*8433398cSSonny Jiang 
895*8433398cSSonny Jiang 	/* disable LMI UMC channel */
896*8433398cSSonny Jiang 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
897*8433398cSSonny Jiang 	tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
898*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
899*8433398cSSonny Jiang 	tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
900*8433398cSSonny Jiang 		UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
901*8433398cSSonny Jiang 	r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
902*8433398cSSonny Jiang 	if (r)
903*8433398cSSonny Jiang 		return r;
904*8433398cSSonny Jiang 
905*8433398cSSonny Jiang 	/* block VCPU register access */
906*8433398cSSonny Jiang 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
907*8433398cSSonny Jiang 		 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
908*8433398cSSonny Jiang 		 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
909*8433398cSSonny Jiang 
910*8433398cSSonny Jiang 	/* reset VCPU */
911*8433398cSSonny Jiang 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
912*8433398cSSonny Jiang 		 UVD_VCPU_CNTL__BLK_RST_MASK,
913*8433398cSSonny Jiang 		 ~UVD_VCPU_CNTL__BLK_RST_MASK);
914*8433398cSSonny Jiang 
915*8433398cSSonny Jiang 	/* disable VCPU clock */
916*8433398cSSonny Jiang 	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
917*8433398cSSonny Jiang 		 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
918*8433398cSSonny Jiang 
919*8433398cSSonny Jiang 	/* apply soft reset */
920*8433398cSSonny Jiang 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
921*8433398cSSonny Jiang 	tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
922*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
923*8433398cSSonny Jiang 	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
924*8433398cSSonny Jiang 	tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
925*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
926*8433398cSSonny Jiang 
927*8433398cSSonny Jiang 	/* clear status */
928*8433398cSSonny Jiang 	WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
929*8433398cSSonny Jiang 
930*8433398cSSonny Jiang 	return 0;
931*8433398cSSonny Jiang }
932*8433398cSSonny Jiang 
933*8433398cSSonny Jiang /**
934*8433398cSSonny Jiang  * vcn_v5_0_2_unified_ring_get_rptr - get unified read pointer
935*8433398cSSonny Jiang  *
936*8433398cSSonny Jiang  * @ring: amdgpu_ring pointer
937*8433398cSSonny Jiang  *
938*8433398cSSonny Jiang  * Returns the current hardware unified read pointer
939*8433398cSSonny Jiang  */
940*8433398cSSonny Jiang static uint64_t vcn_v5_0_2_unified_ring_get_rptr(struct amdgpu_ring *ring)
941*8433398cSSonny Jiang {
942*8433398cSSonny Jiang 	struct amdgpu_device *adev = ring->adev;
943*8433398cSSonny Jiang 
944*8433398cSSonny Jiang 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
945*8433398cSSonny Jiang 		DRM_ERROR("wrong ring id is identified in %s", __func__);
946*8433398cSSonny Jiang 
947*8433398cSSonny Jiang 	return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_RPTR);
948*8433398cSSonny Jiang }
949*8433398cSSonny Jiang 
950*8433398cSSonny Jiang /**
951*8433398cSSonny Jiang  * vcn_v5_0_2_unified_ring_get_wptr - get unified write pointer
952*8433398cSSonny Jiang  *
953*8433398cSSonny Jiang  * @ring: amdgpu_ring pointer
954*8433398cSSonny Jiang  *
955*8433398cSSonny Jiang  * Returns the current hardware unified write pointer
956*8433398cSSonny Jiang  */
957*8433398cSSonny Jiang static uint64_t vcn_v5_0_2_unified_ring_get_wptr(struct amdgpu_ring *ring)
958*8433398cSSonny Jiang {
959*8433398cSSonny Jiang 	struct amdgpu_device *adev = ring->adev;
960*8433398cSSonny Jiang 
961*8433398cSSonny Jiang 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
962*8433398cSSonny Jiang 		DRM_ERROR("wrong ring id is identified in %s", __func__);
963*8433398cSSonny Jiang 
964*8433398cSSonny Jiang 	if (ring->use_doorbell)
965*8433398cSSonny Jiang 		return *ring->wptr_cpu_addr;
966*8433398cSSonny Jiang 	else
967*8433398cSSonny Jiang 		return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR);
968*8433398cSSonny Jiang }
969*8433398cSSonny Jiang 
970*8433398cSSonny Jiang /**
971*8433398cSSonny Jiang  * vcn_v5_0_2_unified_ring_set_wptr - set enc write pointer
972*8433398cSSonny Jiang  *
973*8433398cSSonny Jiang  * @ring: amdgpu_ring pointer
974*8433398cSSonny Jiang  *
975*8433398cSSonny Jiang  * Commits the enc write pointer to the hardware
976*8433398cSSonny Jiang  */
977*8433398cSSonny Jiang static void vcn_v5_0_2_unified_ring_set_wptr(struct amdgpu_ring *ring)
978*8433398cSSonny Jiang {
979*8433398cSSonny Jiang 	struct amdgpu_device *adev = ring->adev;
980*8433398cSSonny Jiang 
981*8433398cSSonny Jiang 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
982*8433398cSSonny Jiang 		DRM_ERROR("wrong ring id is identified in %s", __func__);
983*8433398cSSonny Jiang 
984*8433398cSSonny Jiang 	if (ring->use_doorbell) {
985*8433398cSSonny Jiang 		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
986*8433398cSSonny Jiang 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
987*8433398cSSonny Jiang 	} else {
988*8433398cSSonny Jiang 		WREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR,
989*8433398cSSonny Jiang 				lower_32_bits(ring->wptr));
990*8433398cSSonny Jiang 	}
991*8433398cSSonny Jiang }
992*8433398cSSonny Jiang 
993*8433398cSSonny Jiang static const struct amdgpu_ring_funcs vcn_v5_0_2_unified_ring_vm_funcs = {
994*8433398cSSonny Jiang 	.type = AMDGPU_RING_TYPE_VCN_ENC,
995*8433398cSSonny Jiang 	.align_mask = 0x3f,
996*8433398cSSonny Jiang 	.nop = VCN_ENC_CMD_NO_OP,
997*8433398cSSonny Jiang 	.get_rptr = vcn_v5_0_2_unified_ring_get_rptr,
998*8433398cSSonny Jiang 	.get_wptr = vcn_v5_0_2_unified_ring_get_wptr,
999*8433398cSSonny Jiang 	.set_wptr = vcn_v5_0_2_unified_ring_set_wptr,
1000*8433398cSSonny Jiang 	.emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1001*8433398cSSonny Jiang 			   SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1002*8433398cSSonny Jiang 			   4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1003*8433398cSSonny Jiang 			   5 +
1004*8433398cSSonny Jiang 			   5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1005*8433398cSSonny Jiang 			   1, /* vcn_v2_0_enc_ring_insert_end */
1006*8433398cSSonny Jiang 	.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1007*8433398cSSonny Jiang 	.emit_ib = vcn_v2_0_enc_ring_emit_ib,
1008*8433398cSSonny Jiang 	.emit_fence = vcn_v2_0_enc_ring_emit_fence,
1009*8433398cSSonny Jiang 	.emit_vm_flush = vcn_v4_0_3_enc_ring_emit_vm_flush,
1010*8433398cSSonny Jiang 	.emit_hdp_flush = vcn_v4_0_3_ring_emit_hdp_flush,
1011*8433398cSSonny Jiang 	.test_ring = amdgpu_vcn_enc_ring_test_ring,
1012*8433398cSSonny Jiang 	.test_ib = amdgpu_vcn_unified_ring_test_ib,
1013*8433398cSSonny Jiang 	.insert_nop = amdgpu_ring_insert_nop,
1014*8433398cSSonny Jiang 	.insert_end = vcn_v2_0_enc_ring_insert_end,
1015*8433398cSSonny Jiang 	.pad_ib = amdgpu_ring_generic_pad_ib,
1016*8433398cSSonny Jiang 	.begin_use = amdgpu_vcn_ring_begin_use,
1017*8433398cSSonny Jiang 	.end_use = amdgpu_vcn_ring_end_use,
1018*8433398cSSonny Jiang 	.emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg,
1019*8433398cSSonny Jiang 	.emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait,
1020*8433398cSSonny Jiang 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1021*8433398cSSonny Jiang };
1022*8433398cSSonny Jiang 
1023*8433398cSSonny Jiang /**
1024*8433398cSSonny Jiang  * vcn_v5_0_2_set_unified_ring_funcs - set unified ring functions
1025*8433398cSSonny Jiang  *
1026*8433398cSSonny Jiang  * @adev: amdgpu_device pointer
1027*8433398cSSonny Jiang  *
1028*8433398cSSonny Jiang  * Set unified ring functions
1029*8433398cSSonny Jiang  */
1030*8433398cSSonny Jiang static void vcn_v5_0_2_set_unified_ring_funcs(struct amdgpu_device *adev)
1031*8433398cSSonny Jiang {
1032*8433398cSSonny Jiang 	int i, vcn_inst;
1033*8433398cSSonny Jiang 
1034*8433398cSSonny Jiang 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1035*8433398cSSonny Jiang 		adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v5_0_2_unified_ring_vm_funcs;
1036*8433398cSSonny Jiang 		adev->vcn.inst[i].ring_enc[0].me = i;
1037*8433398cSSonny Jiang 		vcn_inst = GET_INST(VCN, i);
1038*8433398cSSonny Jiang 		adev->vcn.inst[i].aid_id = vcn_inst / adev->vcn.num_inst_per_aid;
1039*8433398cSSonny Jiang 	}
1040*8433398cSSonny Jiang }
1041*8433398cSSonny Jiang 
1042*8433398cSSonny Jiang /**
1043*8433398cSSonny Jiang  * vcn_v5_0_2_is_idle - check VCN block is idle
1044*8433398cSSonny Jiang  *
1045*8433398cSSonny Jiang  * @ip_block: Pointer to the amdgpu_ip_block structure
1046*8433398cSSonny Jiang  *
1047*8433398cSSonny Jiang  * Check whether VCN block is idle
1048*8433398cSSonny Jiang  */
1049*8433398cSSonny Jiang static bool vcn_v5_0_2_is_idle(struct amdgpu_ip_block *ip_block)
1050*8433398cSSonny Jiang {
1051*8433398cSSonny Jiang 	struct amdgpu_device *adev = ip_block->adev;
1052*8433398cSSonny Jiang 	int i, ret = 1;
1053*8433398cSSonny Jiang 
1054*8433398cSSonny Jiang 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
1055*8433398cSSonny Jiang 		ret &= (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) == UVD_STATUS__IDLE);
1056*8433398cSSonny Jiang 
1057*8433398cSSonny Jiang 	return ret;
1058*8433398cSSonny Jiang }
1059*8433398cSSonny Jiang 
1060*8433398cSSonny Jiang /**
1061*8433398cSSonny Jiang  * vcn_v5_0_2_wait_for_idle - wait for VCN block idle
1062*8433398cSSonny Jiang  *
1063*8433398cSSonny Jiang  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
1064*8433398cSSonny Jiang  *
1065*8433398cSSonny Jiang  * Wait for VCN block idle
1066*8433398cSSonny Jiang  */
1067*8433398cSSonny Jiang static int vcn_v5_0_2_wait_for_idle(struct amdgpu_ip_block *ip_block)
1068*8433398cSSonny Jiang {
1069*8433398cSSonny Jiang 	struct amdgpu_device *adev = ip_block->adev;
1070*8433398cSSonny Jiang 	int i, ret = 0;
1071*8433398cSSonny Jiang 
1072*8433398cSSonny Jiang 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1073*8433398cSSonny Jiang 		ret = SOC15_WAIT_ON_RREG(VCN, GET_INST(VCN, i), regUVD_STATUS, UVD_STATUS__IDLE,
1074*8433398cSSonny Jiang 			UVD_STATUS__IDLE);
1075*8433398cSSonny Jiang 		if (ret)
1076*8433398cSSonny Jiang 			return ret;
1077*8433398cSSonny Jiang 	}
1078*8433398cSSonny Jiang 
1079*8433398cSSonny Jiang 	return ret;
1080*8433398cSSonny Jiang }
1081*8433398cSSonny Jiang 
1082*8433398cSSonny Jiang /**
1083*8433398cSSonny Jiang  * vcn_v5_0_2_set_clockgating_state - set VCN block clockgating state
1084*8433398cSSonny Jiang  *
1085*8433398cSSonny Jiang  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
1086*8433398cSSonny Jiang  * @state: clock gating state
1087*8433398cSSonny Jiang  *
1088*8433398cSSonny Jiang  * Set VCN block clockgating state
1089*8433398cSSonny Jiang  */
1090*8433398cSSonny Jiang static int vcn_v5_0_2_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1091*8433398cSSonny Jiang 					    enum amd_clockgating_state state)
1092*8433398cSSonny Jiang {
1093*8433398cSSonny Jiang 	struct amdgpu_device *adev = ip_block->adev;
1094*8433398cSSonny Jiang 	bool enable = state == AMD_CG_STATE_GATE;
1095*8433398cSSonny Jiang 	int i;
1096*8433398cSSonny Jiang 
1097*8433398cSSonny Jiang 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1098*8433398cSSonny Jiang 		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
1099*8433398cSSonny Jiang 
1100*8433398cSSonny Jiang 		if (enable) {
1101*8433398cSSonny Jiang 			if (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) != UVD_STATUS__IDLE)
1102*8433398cSSonny Jiang 				return -EBUSY;
1103*8433398cSSonny Jiang 			vcn_v5_0_2_enable_clock_gating(vinst);
1104*8433398cSSonny Jiang 		} else {
1105*8433398cSSonny Jiang 			vcn_v5_0_2_disable_clock_gating(vinst);
1106*8433398cSSonny Jiang 		}
1107*8433398cSSonny Jiang 	}
1108*8433398cSSonny Jiang 
1109*8433398cSSonny Jiang 	return 0;
1110*8433398cSSonny Jiang }
1111*8433398cSSonny Jiang 
1112*8433398cSSonny Jiang static int vcn_v5_0_2_set_pg_state(struct amdgpu_vcn_inst *vinst,
1113*8433398cSSonny Jiang 				   enum amd_powergating_state state)
1114*8433398cSSonny Jiang {
1115*8433398cSSonny Jiang 	int ret = 0;
1116*8433398cSSonny Jiang 
1117*8433398cSSonny Jiang 	if (state == vinst->cur_state)
1118*8433398cSSonny Jiang 		return 0;
1119*8433398cSSonny Jiang 
1120*8433398cSSonny Jiang 	if (state == AMD_PG_STATE_GATE)
1121*8433398cSSonny Jiang 		ret = vcn_v5_0_2_stop(vinst);
1122*8433398cSSonny Jiang 	else
1123*8433398cSSonny Jiang 		ret = vcn_v5_0_2_start(vinst);
1124*8433398cSSonny Jiang 
1125*8433398cSSonny Jiang 	if (!ret)
1126*8433398cSSonny Jiang 		vinst->cur_state = state;
1127*8433398cSSonny Jiang 
1128*8433398cSSonny Jiang 	return ret;
1129*8433398cSSonny Jiang }
1130*8433398cSSonny Jiang 
1131*8433398cSSonny Jiang /**
1132*8433398cSSonny Jiang  * vcn_v5_0_2_process_interrupt - process VCN block interrupt
1133*8433398cSSonny Jiang  *
1134*8433398cSSonny Jiang  * @adev: amdgpu_device pointer
1135*8433398cSSonny Jiang  * @source: interrupt sources
1136*8433398cSSonny Jiang  * @entry: interrupt entry from clients and sources
1137*8433398cSSonny Jiang  *
1138*8433398cSSonny Jiang  * Process VCN block interrupt
1139*8433398cSSonny Jiang  */
1140*8433398cSSonny Jiang static int vcn_v5_0_2_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
1141*8433398cSSonny Jiang 	struct amdgpu_iv_entry *entry)
1142*8433398cSSonny Jiang {
1143*8433398cSSonny Jiang 	uint32_t i, inst;
1144*8433398cSSonny Jiang 
1145*8433398cSSonny Jiang 	i = node_id_to_phys_map[entry->node_id];
1146*8433398cSSonny Jiang 
1147*8433398cSSonny Jiang 	DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n");
1148*8433398cSSonny Jiang 
1149*8433398cSSonny Jiang 	for (inst = 0; inst < adev->vcn.num_vcn_inst; ++inst)
1150*8433398cSSonny Jiang 		if (adev->vcn.inst[inst].aid_id == i)
1151*8433398cSSonny Jiang 			break;
1152*8433398cSSonny Jiang 
1153*8433398cSSonny Jiang 	if (inst >= adev->vcn.num_vcn_inst) {
1154*8433398cSSonny Jiang 		dev_WARN_ONCE(adev->dev, 1,
1155*8433398cSSonny Jiang 				"Interrupt received for unknown VCN instance %d",
1156*8433398cSSonny Jiang 				entry->node_id);
1157*8433398cSSonny Jiang 		return 0;
1158*8433398cSSonny Jiang 	}
1159*8433398cSSonny Jiang 
1160*8433398cSSonny Jiang 	switch (entry->src_id) {
1161*8433398cSSonny Jiang 	case VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1162*8433398cSSonny Jiang 		amdgpu_fence_process(&adev->vcn.inst[inst].ring_enc[0]);
1163*8433398cSSonny Jiang 		break;
1164*8433398cSSonny Jiang 	default:
1165*8433398cSSonny Jiang 		DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
1166*8433398cSSonny Jiang 			  entry->src_id, entry->src_data[0]);
1167*8433398cSSonny Jiang 		break;
1168*8433398cSSonny Jiang 	}
1169*8433398cSSonny Jiang 
1170*8433398cSSonny Jiang 	return 0;
1171*8433398cSSonny Jiang }
1172*8433398cSSonny Jiang 
1173*8433398cSSonny Jiang static const struct amdgpu_irq_src_funcs vcn_v5_0_2_irq_funcs = {
1174*8433398cSSonny Jiang 	.process = vcn_v5_0_2_process_interrupt,
1175*8433398cSSonny Jiang };
1176*8433398cSSonny Jiang 
1177*8433398cSSonny Jiang /**
1178*8433398cSSonny Jiang  * vcn_v5_0_2_set_irq_funcs - set VCN block interrupt irq functions
1179*8433398cSSonny Jiang  *
1180*8433398cSSonny Jiang  * @adev: amdgpu_device pointer
1181*8433398cSSonny Jiang  *
1182*8433398cSSonny Jiang  * Set VCN block interrupt irq functions
1183*8433398cSSonny Jiang  */
1184*8433398cSSonny Jiang static void vcn_v5_0_2_set_irq_funcs(struct amdgpu_device *adev)
1185*8433398cSSonny Jiang {
1186*8433398cSSonny Jiang 	int i;
1187*8433398cSSonny Jiang 
1188*8433398cSSonny Jiang 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
1189*8433398cSSonny Jiang 		adev->vcn.inst->irq.num_types++;
1190*8433398cSSonny Jiang 	adev->vcn.inst->irq.funcs = &vcn_v5_0_2_irq_funcs;
1191*8433398cSSonny Jiang }
1192*8433398cSSonny Jiang 
1193*8433398cSSonny Jiang static const struct amd_ip_funcs vcn_v5_0_2_ip_funcs = {
1194*8433398cSSonny Jiang 	.name = "vcn_v5_0_2",
1195*8433398cSSonny Jiang 	.early_init = vcn_v5_0_2_early_init,
1196*8433398cSSonny Jiang 	.late_init = NULL,
1197*8433398cSSonny Jiang 	.sw_init = vcn_v5_0_2_sw_init,
1198*8433398cSSonny Jiang 	.sw_fini = vcn_v5_0_2_sw_fini,
1199*8433398cSSonny Jiang 	.hw_init = vcn_v5_0_2_hw_init,
1200*8433398cSSonny Jiang 	.hw_fini = vcn_v5_0_2_hw_fini,
1201*8433398cSSonny Jiang 	.suspend = vcn_v5_0_2_suspend,
1202*8433398cSSonny Jiang 	.resume = vcn_v5_0_2_resume,
1203*8433398cSSonny Jiang 	.is_idle = vcn_v5_0_2_is_idle,
1204*8433398cSSonny Jiang 	.wait_for_idle = vcn_v5_0_2_wait_for_idle,
1205*8433398cSSonny Jiang 	.check_soft_reset = NULL,
1206*8433398cSSonny Jiang 	.pre_soft_reset = NULL,
1207*8433398cSSonny Jiang 	.soft_reset = NULL,
1208*8433398cSSonny Jiang 	.post_soft_reset = NULL,
1209*8433398cSSonny Jiang 	.set_clockgating_state = vcn_v5_0_2_set_clockgating_state,
1210*8433398cSSonny Jiang 	.set_powergating_state = vcn_set_powergating_state,
1211*8433398cSSonny Jiang };
1212*8433398cSSonny Jiang 
1213*8433398cSSonny Jiang const struct amdgpu_ip_block_version vcn_v5_0_2_ip_block = {
1214*8433398cSSonny Jiang 	.type = AMD_IP_BLOCK_TYPE_VCN,
1215*8433398cSSonny Jiang 	.major = 5,
1216*8433398cSSonny Jiang 	.minor = 0,
1217*8433398cSSonny Jiang 	.rev = 2,
1218*8433398cSSonny Jiang 	.funcs = &vcn_v5_0_2_ip_funcs,
1219*8433398cSSonny Jiang };
1220