xref: /linux/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c (revision db5d28c0bfe566908719bec8e25443aabecbb802)
1a2e73f56SAlex Deucher /*
2a2e73f56SAlex Deucher  * Copyright 2013 Advanced Micro Devices, Inc.
3a2e73f56SAlex Deucher  *
4a2e73f56SAlex Deucher  * Permission is hereby granted, free of charge, to any person obtaining a
5a2e73f56SAlex Deucher  * copy of this software and associated documentation files (the "Software"),
6a2e73f56SAlex Deucher  * to deal in the Software without restriction, including without limitation
7a2e73f56SAlex Deucher  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8a2e73f56SAlex Deucher  * and/or sell copies of the Software, and to permit persons to whom the
9a2e73f56SAlex Deucher  * Software is furnished to do so, subject to the following conditions:
10a2e73f56SAlex Deucher  *
11a2e73f56SAlex Deucher  * The above copyright notice and this permission notice shall be included in
12a2e73f56SAlex Deucher  * all copies or substantial portions of the Software.
13a2e73f56SAlex Deucher  *
14a2e73f56SAlex Deucher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15a2e73f56SAlex Deucher  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16a2e73f56SAlex Deucher  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17a2e73f56SAlex Deucher  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18a2e73f56SAlex Deucher  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19a2e73f56SAlex Deucher  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20a2e73f56SAlex Deucher  * OTHER DEALINGS IN THE SOFTWARE.
21a2e73f56SAlex Deucher  *
22a2e73f56SAlex Deucher  * Authors: Christian König <christian.koenig@amd.com>
23a2e73f56SAlex Deucher  */
24a2e73f56SAlex Deucher 
25a2e73f56SAlex Deucher #include <linux/firmware.h>
2647b757fbSSam Ravnborg 
27a2e73f56SAlex Deucher #include "amdgpu.h"
28a2e73f56SAlex Deucher #include "amdgpu_uvd.h"
29a2e73f56SAlex Deucher #include "cikd.h"
30a2e73f56SAlex Deucher 
31a2e73f56SAlex Deucher #include "uvd/uvd_4_2_d.h"
32a2e73f56SAlex Deucher #include "uvd/uvd_4_2_sh_mask.h"
33a2e73f56SAlex Deucher 
34a2e73f56SAlex Deucher #include "oss/oss_2_0_d.h"
35a2e73f56SAlex Deucher #include "oss/oss_2_0_sh_mask.h"
36a2e73f56SAlex Deucher 
37d5b4e25dSChristian König #include "bif/bif_4_1_d.h"
38d5b4e25dSChristian König 
394be5097cSRex Zhu #include "smu/smu_7_0_1_d.h"
404be5097cSRex Zhu #include "smu/smu_7_0_1_sh_mask.h"
414be5097cSRex Zhu 
42a2e73f56SAlex Deucher static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
43a2e73f56SAlex Deucher static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
44a2e73f56SAlex Deucher static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
45a2e73f56SAlex Deucher static int uvd_v4_2_start(struct amdgpu_device *adev);
46a2e73f56SAlex Deucher static void uvd_v4_2_stop(struct amdgpu_device *adev);
47aa4747c0SRex Zhu static int uvd_v4_2_set_clockgating_state(void *handle,
48aa4747c0SRex Zhu 				enum amd_clockgating_state state);
49ca581e45SRex Zhu static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
50ca581e45SRex Zhu 			     bool sw_mode);
51a2e73f56SAlex Deucher /**
52a2e73f56SAlex Deucher  * uvd_v4_2_ring_get_rptr - get read pointer
53a2e73f56SAlex Deucher  *
54a2e73f56SAlex Deucher  * @ring: amdgpu_ring pointer
55a2e73f56SAlex Deucher  *
56a2e73f56SAlex Deucher  * Returns the current hardware read pointer
57a2e73f56SAlex Deucher  */
uvd_v4_2_ring_get_rptr(struct amdgpu_ring * ring)58536fbf94SKen Wang static uint64_t uvd_v4_2_ring_get_rptr(struct amdgpu_ring *ring)
59a2e73f56SAlex Deucher {
60a2e73f56SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
61a2e73f56SAlex Deucher 
62a2e73f56SAlex Deucher 	return RREG32(mmUVD_RBC_RB_RPTR);
63a2e73f56SAlex Deucher }
64a2e73f56SAlex Deucher 
65a2e73f56SAlex Deucher /**
66a2e73f56SAlex Deucher  * uvd_v4_2_ring_get_wptr - get write pointer
67a2e73f56SAlex Deucher  *
68a2e73f56SAlex Deucher  * @ring: amdgpu_ring pointer
69a2e73f56SAlex Deucher  *
70a2e73f56SAlex Deucher  * Returns the current hardware write pointer
71a2e73f56SAlex Deucher  */
uvd_v4_2_ring_get_wptr(struct amdgpu_ring * ring)72536fbf94SKen Wang static uint64_t uvd_v4_2_ring_get_wptr(struct amdgpu_ring *ring)
73a2e73f56SAlex Deucher {
74a2e73f56SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
75a2e73f56SAlex Deucher 
76a2e73f56SAlex Deucher 	return RREG32(mmUVD_RBC_RB_WPTR);
77a2e73f56SAlex Deucher }
78a2e73f56SAlex Deucher 
79a2e73f56SAlex Deucher /**
80a2e73f56SAlex Deucher  * uvd_v4_2_ring_set_wptr - set write pointer
81a2e73f56SAlex Deucher  *
82a2e73f56SAlex Deucher  * @ring: amdgpu_ring pointer
83a2e73f56SAlex Deucher  *
84a2e73f56SAlex Deucher  * Commits the write pointer to the hardware
85a2e73f56SAlex Deucher  */
uvd_v4_2_ring_set_wptr(struct amdgpu_ring * ring)86a2e73f56SAlex Deucher static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring)
87a2e73f56SAlex Deucher {
88a2e73f56SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
89a2e73f56SAlex Deucher 
90536fbf94SKen Wang 	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
91a2e73f56SAlex Deucher }
92a2e73f56SAlex Deucher 
uvd_v4_2_early_init(void * handle)935fc3aeebSyanyang1 static int uvd_v4_2_early_init(void *handle)
94a2e73f56SAlex Deucher {
955fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
962bb795f5SJames Zhu 	adev->uvd.num_uvd_inst = 1;
975fc3aeebSyanyang1 
98a2e73f56SAlex Deucher 	uvd_v4_2_set_ring_funcs(adev);
99a2e73f56SAlex Deucher 	uvd_v4_2_set_irq_funcs(adev);
100a2e73f56SAlex Deucher 
101a2e73f56SAlex Deucher 	return 0;
102a2e73f56SAlex Deucher }
103a2e73f56SAlex Deucher 
uvd_v4_2_sw_init(void * handle)1045fc3aeebSyanyang1 static int uvd_v4_2_sw_init(void *handle)
105a2e73f56SAlex Deucher {
106a2e73f56SAlex Deucher 	struct amdgpu_ring *ring;
1075fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
108a2e73f56SAlex Deucher 	int r;
109a2e73f56SAlex Deucher 
110a2e73f56SAlex Deucher 	/* UVD TRAP */
1111ffdeca6SChristian König 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
112a2e73f56SAlex Deucher 	if (r)
113a2e73f56SAlex Deucher 		return r;
114a2e73f56SAlex Deucher 
115a2e73f56SAlex Deucher 	r = amdgpu_uvd_sw_init(adev);
116a2e73f56SAlex Deucher 	if (r)
117a2e73f56SAlex Deucher 		return r;
118a2e73f56SAlex Deucher 
1192bb795f5SJames Zhu 	ring = &adev->uvd.inst->ring;
120a2e73f56SAlex Deucher 	sprintf(ring->name, "uvd");
1211c6d567bSNirmoy Das 	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
122c107171bSChristian König 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
12333d5bd07SEmily Deng 	if (r)
12433d5bd07SEmily Deng 		return r;
12533d5bd07SEmily Deng 
1263b34c14fSChris Wilson 	r = amdgpu_uvd_resume(adev);
1273b34c14fSChris Wilson 	if (r)
1283b34c14fSChris Wilson 		return r;
1293b34c14fSChris Wilson 
130a2e73f56SAlex Deucher 	return r;
131a2e73f56SAlex Deucher }
132a2e73f56SAlex Deucher 
uvd_v4_2_sw_fini(void * handle)1335fc3aeebSyanyang1 static int uvd_v4_2_sw_fini(void *handle)
134a2e73f56SAlex Deucher {
135a2e73f56SAlex Deucher 	int r;
1365fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
137a2e73f56SAlex Deucher 
138a2e73f56SAlex Deucher 	r = amdgpu_uvd_suspend(adev);
139a2e73f56SAlex Deucher 	if (r)
140a2e73f56SAlex Deucher 		return r;
141a2e73f56SAlex Deucher 
14250237287SRex Zhu 	return amdgpu_uvd_sw_fini(adev);
143a2e73f56SAlex Deucher }
14450237287SRex Zhu 
145ca581e45SRex Zhu static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
146ca581e45SRex Zhu 				 bool enable);
147a2e73f56SAlex Deucher /**
148a2e73f56SAlex Deucher  * uvd_v4_2_hw_init - start and test UVD block
149a2e73f56SAlex Deucher  *
15044eb261cSLee Jones  * @handle: handle used to pass amdgpu_device pointer
151a2e73f56SAlex Deucher  *
152a2e73f56SAlex Deucher  * Initialize the hardware, boot up the VCPU and do some testing
153a2e73f56SAlex Deucher  */
uvd_v4_2_hw_init(void * handle)1545fc3aeebSyanyang1 static int uvd_v4_2_hw_init(void *handle)
155a2e73f56SAlex Deucher {
1565fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1572bb795f5SJames Zhu 	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
158a2e73f56SAlex Deucher 	uint32_t tmp;
159a2e73f56SAlex Deucher 	int r;
160a2e73f56SAlex Deucher 
161ca581e45SRex Zhu 	uvd_v4_2_enable_mgcg(adev, true);
162aa4747c0SRex Zhu 	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
163a2e73f56SAlex Deucher 
164c66ed765SAndrey Grodzovsky 	r = amdgpu_ring_test_helper(ring);
165c66ed765SAndrey Grodzovsky 	if (r)
166a2e73f56SAlex Deucher 		goto done;
167a2e73f56SAlex Deucher 
168a27de35cSChristian König 	r = amdgpu_ring_alloc(ring, 10);
169a2e73f56SAlex Deucher 	if (r) {
170a2e73f56SAlex Deucher 		DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
171a2e73f56SAlex Deucher 		goto done;
172a2e73f56SAlex Deucher 	}
173a2e73f56SAlex Deucher 
174a2e73f56SAlex Deucher 	tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
175a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, tmp);
176a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, 0xFFFFF);
177a2e73f56SAlex Deucher 
178a2e73f56SAlex Deucher 	tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
179a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, tmp);
180a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, 0xFFFFF);
181a2e73f56SAlex Deucher 
182a2e73f56SAlex Deucher 	tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
183a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, tmp);
184a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, 0xFFFFF);
185a2e73f56SAlex Deucher 
186a2e73f56SAlex Deucher 	/* Clear timeout status bits */
187a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
188a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, 0x8);
189a2e73f56SAlex Deucher 
190a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
191a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, 3);
192a2e73f56SAlex Deucher 
193a27de35cSChristian König 	amdgpu_ring_commit(ring);
194a2e73f56SAlex Deucher 
195a2e73f56SAlex Deucher done:
196a2e73f56SAlex Deucher 	if (!r)
197a2e73f56SAlex Deucher 		DRM_INFO("UVD initialized successfully.\n");
198a2e73f56SAlex Deucher 
199a2e73f56SAlex Deucher 	return r;
200a2e73f56SAlex Deucher }
201a2e73f56SAlex Deucher 
202a2e73f56SAlex Deucher /**
203a2e73f56SAlex Deucher  * uvd_v4_2_hw_fini - stop the hardware block
204a2e73f56SAlex Deucher  *
20544eb261cSLee Jones  * @handle: handle used to pass amdgpu_device pointer
206a2e73f56SAlex Deucher  *
207a2e73f56SAlex Deucher  * Stop the UVD block, mark ring as not ready any more
208a2e73f56SAlex Deucher  */
uvd_v4_2_hw_fini(void * handle)2095fc3aeebSyanyang1 static int uvd_v4_2_hw_fini(void *handle)
210a2e73f56SAlex Deucher {
2115fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
212a2e73f56SAlex Deucher 
213d82e2c24SAndrey Grodzovsky 	cancel_delayed_work_sync(&adev->uvd.idle_work);
214d82e2c24SAndrey Grodzovsky 
215d82e2c24SAndrey Grodzovsky 	if (RREG32(mmUVD_STATUS) != 0)
216d82e2c24SAndrey Grodzovsky 		uvd_v4_2_stop(adev);
217d82e2c24SAndrey Grodzovsky 
218d82e2c24SAndrey Grodzovsky 	return 0;
219d82e2c24SAndrey Grodzovsky }
220d82e2c24SAndrey Grodzovsky 
uvd_v4_2_prepare_suspend(void * handle)221db998890SMario Limonciello static int uvd_v4_2_prepare_suspend(void *handle)
222db998890SMario Limonciello {
223db998890SMario Limonciello 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
224db998890SMario Limonciello 
225db998890SMario Limonciello 	return amdgpu_uvd_prepare_suspend(adev);
226db998890SMario Limonciello }
227db998890SMario Limonciello 
uvd_v4_2_suspend(void * handle)228d82e2c24SAndrey Grodzovsky static int uvd_v4_2_suspend(void *handle)
229d82e2c24SAndrey Grodzovsky {
230d82e2c24SAndrey Grodzovsky 	int r;
231d82e2c24SAndrey Grodzovsky 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
232d82e2c24SAndrey Grodzovsky 
233859e4659SEvan Quan 	/*
234859e4659SEvan Quan 	 * Proper cleanups before halting the HW engine:
235859e4659SEvan Quan 	 *   - cancel the delayed idle work
236859e4659SEvan Quan 	 *   - enable powergating
237859e4659SEvan Quan 	 *   - enable clockgating
238859e4659SEvan Quan 	 *   - disable dpm
239859e4659SEvan Quan 	 *
240859e4659SEvan Quan 	 * TODO: to align with the VCN implementation, move the
241859e4659SEvan Quan 	 * jobs for clockgating/powergating/dpm setting to
242859e4659SEvan Quan 	 * ->set_powergating_state().
243859e4659SEvan Quan 	 */
244859e4659SEvan Quan 	cancel_delayed_work_sync(&adev->uvd.idle_work);
245859e4659SEvan Quan 
246859e4659SEvan Quan 	if (adev->pm.dpm_enabled) {
247859e4659SEvan Quan 		amdgpu_dpm_enable_uvd(adev, false);
248859e4659SEvan Quan 	} else {
249859e4659SEvan Quan 		amdgpu_asic_set_uvd_clocks(adev, 0, 0);
250859e4659SEvan Quan 		/* shutdown the UVD block */
251859e4659SEvan Quan 		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
252859e4659SEvan Quan 						       AMD_PG_STATE_GATE);
253859e4659SEvan Quan 		amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
254859e4659SEvan Quan 						       AMD_CG_STATE_GATE);
255859e4659SEvan Quan 	}
256859e4659SEvan Quan 
2573f99dd81SLeo Liu 	r = uvd_v4_2_hw_fini(adev);
258a2e73f56SAlex Deucher 	if (r)
259a2e73f56SAlex Deucher 		return r;
260a2e73f56SAlex Deucher 
26150237287SRex Zhu 	return amdgpu_uvd_suspend(adev);
262a2e73f56SAlex Deucher }
263a2e73f56SAlex Deucher 
uvd_v4_2_resume(void * handle)2645fc3aeebSyanyang1 static int uvd_v4_2_resume(void *handle)
265a2e73f56SAlex Deucher {
266a2e73f56SAlex Deucher 	int r;
2675fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
268a2e73f56SAlex Deucher 
269a2e73f56SAlex Deucher 	r = amdgpu_uvd_resume(adev);
270a2e73f56SAlex Deucher 	if (r)
271a2e73f56SAlex Deucher 		return r;
272a2e73f56SAlex Deucher 
27350237287SRex Zhu 	return uvd_v4_2_hw_init(adev);
274a2e73f56SAlex Deucher }
275a2e73f56SAlex Deucher 
276a2e73f56SAlex Deucher /**
277a2e73f56SAlex Deucher  * uvd_v4_2_start - start UVD block
278a2e73f56SAlex Deucher  *
279a2e73f56SAlex Deucher  * @adev: amdgpu_device pointer
280a2e73f56SAlex Deucher  *
281a2e73f56SAlex Deucher  * Setup and start the UVD block
282a2e73f56SAlex Deucher  */
uvd_v4_2_start(struct amdgpu_device * adev)283a2e73f56SAlex Deucher static int uvd_v4_2_start(struct amdgpu_device *adev)
284a2e73f56SAlex Deucher {
2852bb795f5SJames Zhu 	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
286a2e73f56SAlex Deucher 	uint32_t rb_bufsz;
287a2e73f56SAlex Deucher 	int i, j, r;
2888b55d17eSRex Zhu 	u32 tmp;
289a2e73f56SAlex Deucher 	/* disable byte swapping */
290a2e73f56SAlex Deucher 	u32 lmi_swap_cntl = 0;
291a2e73f56SAlex Deucher 	u32 mp_swap_cntl = 0;
292a2e73f56SAlex Deucher 
2938b55d17eSRex Zhu 	/* set uvd busy */
2948b55d17eSRex Zhu 	WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2));
2958b55d17eSRex Zhu 
296ca581e45SRex Zhu 	uvd_v4_2_set_dcm(adev, true);
2978b55d17eSRex Zhu 	WREG32(mmUVD_CGC_GATE, 0);
298a2e73f56SAlex Deucher 
299a2e73f56SAlex Deucher 	/* take UVD block out of reset */
300a2e73f56SAlex Deucher 	WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
301a2e73f56SAlex Deucher 	mdelay(5);
302a2e73f56SAlex Deucher 
3038b55d17eSRex Zhu 	/* enable VCPU clock */
3048b55d17eSRex Zhu 	WREG32(mmUVD_VCPU_CNTL,  1 << 9);
3058b55d17eSRex Zhu 
3068b55d17eSRex Zhu 	/* disable interupt */
3078b55d17eSRex Zhu 	WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
308a2e73f56SAlex Deucher 
309a2e73f56SAlex Deucher #ifdef __BIG_ENDIAN
310a2e73f56SAlex Deucher 	/* swap (8 in 32) RB and IB */
311a2e73f56SAlex Deucher 	lmi_swap_cntl = 0xa;
312a2e73f56SAlex Deucher 	mp_swap_cntl = 0;
313a2e73f56SAlex Deucher #endif
314a2e73f56SAlex Deucher 	WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
315a2e73f56SAlex Deucher 	WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
3168b55d17eSRex Zhu 	/* initialize UVD memory controller */
3178b55d17eSRex Zhu 	WREG32(mmUVD_LMI_CTRL, 0x203108);
3188b55d17eSRex Zhu 
3198b55d17eSRex Zhu 	tmp = RREG32(mmUVD_MPC_CNTL);
3208b55d17eSRex Zhu 	WREG32(mmUVD_MPC_CNTL, tmp | 0x10);
321a2e73f56SAlex Deucher 
322a2e73f56SAlex Deucher 	WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
323a2e73f56SAlex Deucher 	WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
324a2e73f56SAlex Deucher 	WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
325a2e73f56SAlex Deucher 	WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
326a2e73f56SAlex Deucher 	WREG32(mmUVD_MPC_SET_ALU, 0);
327a2e73f56SAlex Deucher 	WREG32(mmUVD_MPC_SET_MUX, 0x88);
328a2e73f56SAlex Deucher 
3298b55d17eSRex Zhu 	uvd_v4_2_mc_resume(adev);
330a2e73f56SAlex Deucher 
3318b55d17eSRex Zhu 	tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL);
3328b55d17eSRex Zhu 	WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10));
333a2e73f56SAlex Deucher 
334a2e73f56SAlex Deucher 	/* enable UMC */
335a2e73f56SAlex Deucher 	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
336a2e73f56SAlex Deucher 
3378b55d17eSRex Zhu 	WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
3388b55d17eSRex Zhu 
3398b55d17eSRex Zhu 	WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
3408b55d17eSRex Zhu 
3418b55d17eSRex Zhu 	WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
3428b55d17eSRex Zhu 
343a2e73f56SAlex Deucher 	mdelay(10);
344a2e73f56SAlex Deucher 
345a2e73f56SAlex Deucher 	for (i = 0; i < 10; ++i) {
346a2e73f56SAlex Deucher 		uint32_t status;
347a2e73f56SAlex Deucher 		for (j = 0; j < 100; ++j) {
348a2e73f56SAlex Deucher 			status = RREG32(mmUVD_STATUS);
349a2e73f56SAlex Deucher 			if (status & 2)
350a2e73f56SAlex Deucher 				break;
351a2e73f56SAlex Deucher 			mdelay(10);
352a2e73f56SAlex Deucher 		}
353a2e73f56SAlex Deucher 		r = 0;
354a2e73f56SAlex Deucher 		if (status & 2)
355a2e73f56SAlex Deucher 			break;
356a2e73f56SAlex Deucher 
357a2e73f56SAlex Deucher 		DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
358a2e73f56SAlex Deucher 		WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
359a2e73f56SAlex Deucher 				~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
360a2e73f56SAlex Deucher 		mdelay(10);
361a2e73f56SAlex Deucher 		WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
362a2e73f56SAlex Deucher 		mdelay(10);
363a2e73f56SAlex Deucher 		r = -1;
364a2e73f56SAlex Deucher 	}
365a2e73f56SAlex Deucher 
366a2e73f56SAlex Deucher 	if (r) {
367a2e73f56SAlex Deucher 		DRM_ERROR("UVD not responding, giving up!!!\n");
368a2e73f56SAlex Deucher 		return r;
369a2e73f56SAlex Deucher 	}
370a2e73f56SAlex Deucher 
371a2e73f56SAlex Deucher 	/* enable interupt */
372a2e73f56SAlex Deucher 	WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1));
373a2e73f56SAlex Deucher 
3748b55d17eSRex Zhu 	WREG32_P(mmUVD_STATUS, 0, ~(1<<2));
3758b55d17eSRex Zhu 
376a2e73f56SAlex Deucher 	/* force RBC into idle state */
377a2e73f56SAlex Deucher 	WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
378a2e73f56SAlex Deucher 
379a2e73f56SAlex Deucher 	/* Set the write pointer delay */
380a2e73f56SAlex Deucher 	WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
381a2e73f56SAlex Deucher 
382f349f772SBernard Zhao 	/* program the 4GB memory segment for rptr and ring buffer */
383a2e73f56SAlex Deucher 	WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
384a2e73f56SAlex Deucher 				   (0x7 << 16) | (0x1 << 31));
385a2e73f56SAlex Deucher 
386a2e73f56SAlex Deucher 	/* Initialize the ring buffer's read and write pointers */
387a2e73f56SAlex Deucher 	WREG32(mmUVD_RBC_RB_RPTR, 0x0);
388a2e73f56SAlex Deucher 
389a2e73f56SAlex Deucher 	ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
390536fbf94SKen Wang 	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
391a2e73f56SAlex Deucher 
392a2e73f56SAlex Deucher 	/* set the ring address */
393a2e73f56SAlex Deucher 	WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr);
394a2e73f56SAlex Deucher 
395a2e73f56SAlex Deucher 	/* Set ring buffer size */
396a2e73f56SAlex Deucher 	rb_bufsz = order_base_2(ring->ring_size);
397a2e73f56SAlex Deucher 	rb_bufsz = (0x1 << 8) | rb_bufsz;
398a2e73f56SAlex Deucher 	WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f);
399a2e73f56SAlex Deucher 
400a2e73f56SAlex Deucher 	return 0;
401a2e73f56SAlex Deucher }
402a2e73f56SAlex Deucher 
403a2e73f56SAlex Deucher /**
404a2e73f56SAlex Deucher  * uvd_v4_2_stop - stop UVD block
405a2e73f56SAlex Deucher  *
406a2e73f56SAlex Deucher  * @adev: amdgpu_device pointer
407a2e73f56SAlex Deucher  *
408a2e73f56SAlex Deucher  * stop the UVD block
409a2e73f56SAlex Deucher  */
uvd_v4_2_stop(struct amdgpu_device * adev)410a2e73f56SAlex Deucher static void uvd_v4_2_stop(struct amdgpu_device *adev)
411a2e73f56SAlex Deucher {
4128b55d17eSRex Zhu 	uint32_t i, j;
4138b55d17eSRex Zhu 	uint32_t status;
4148b55d17eSRex Zhu 
415a2e73f56SAlex Deucher 	WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
416a2e73f56SAlex Deucher 
4178b55d17eSRex Zhu 	for (i = 0; i < 10; ++i) {
4188b55d17eSRex Zhu 		for (j = 0; j < 100; ++j) {
4198b55d17eSRex Zhu 			status = RREG32(mmUVD_STATUS);
4208b55d17eSRex Zhu 			if (status & 2)
4218b55d17eSRex Zhu 				break;
4228b55d17eSRex Zhu 			mdelay(1);
4238b55d17eSRex Zhu 		}
424e89d5b5cSTom St Denis 		if (status & 2)
4258b55d17eSRex Zhu 			break;
4268b55d17eSRex Zhu 	}
4278b55d17eSRex Zhu 
4288b55d17eSRex Zhu 	for (i = 0; i < 10; ++i) {
4298b55d17eSRex Zhu 		for (j = 0; j < 100; ++j) {
4308b55d17eSRex Zhu 			status = RREG32(mmUVD_LMI_STATUS);
4318b55d17eSRex Zhu 			if (status & 0xf)
4328b55d17eSRex Zhu 				break;
4338b55d17eSRex Zhu 			mdelay(1);
4348b55d17eSRex Zhu 		}
435e89d5b5cSTom St Denis 		if (status & 0xf)
4368b55d17eSRex Zhu 			break;
4378b55d17eSRex Zhu 	}
4388b55d17eSRex Zhu 
439a2e73f56SAlex Deucher 	/* Stall UMC and register bus before resetting VCPU */
440a2e73f56SAlex Deucher 	WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
4418b55d17eSRex Zhu 
4428b55d17eSRex Zhu 	for (i = 0; i < 10; ++i) {
4438b55d17eSRex Zhu 		for (j = 0; j < 100; ++j) {
4448b55d17eSRex Zhu 			status = RREG32(mmUVD_LMI_STATUS);
4458b55d17eSRex Zhu 			if (status & 0x240)
4468b55d17eSRex Zhu 				break;
447a2e73f56SAlex Deucher 			mdelay(1);
4488b55d17eSRex Zhu 		}
449e89d5b5cSTom St Denis 		if (status & 0x240)
4508b55d17eSRex Zhu 			break;
4518b55d17eSRex Zhu 	}
452a2e73f56SAlex Deucher 
4538b55d17eSRex Zhu 	WREG32_P(0x3D49, 0, ~(1 << 2));
454a2e73f56SAlex Deucher 
4558b55d17eSRex Zhu 	WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9));
456a2e73f56SAlex Deucher 
4578b55d17eSRex Zhu 	/* put LMI, VCPU, RBC etc... into reset */
4588b55d17eSRex Zhu 	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
4598b55d17eSRex Zhu 		UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
4608b55d17eSRex Zhu 		UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
4618b55d17eSRex Zhu 
4628b55d17eSRex Zhu 	WREG32(mmUVD_STATUS, 0);
463ca581e45SRex Zhu 
464ca581e45SRex Zhu 	uvd_v4_2_set_dcm(adev, false);
465a2e73f56SAlex Deucher }
466a2e73f56SAlex Deucher 
467a2e73f56SAlex Deucher /**
468a2e73f56SAlex Deucher  * uvd_v4_2_ring_emit_fence - emit an fence & trap command
469a2e73f56SAlex Deucher  *
470a2e73f56SAlex Deucher  * @ring: amdgpu_ring pointer
47144eb261cSLee Jones  * @addr: address
47244eb261cSLee Jones  * @seq: sequence number
4738a0fdc72SLee Jones  * @flags: fence related flags
474a2e73f56SAlex Deucher  *
475a2e73f56SAlex Deucher  * Write a fence and a trap command to the ring.
476a2e73f56SAlex Deucher  */
uvd_v4_2_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)477a2e73f56SAlex Deucher static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
478890ee23fSChunming Zhou 				     unsigned flags)
479a2e73f56SAlex Deucher {
480890ee23fSChunming Zhou 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
481a2e73f56SAlex Deucher 
482a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
483a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, seq);
484a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
485a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, addr & 0xffffffff);
486a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
487a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
488a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
489a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, 0);
490a2e73f56SAlex Deucher 
491a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
492a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, 0);
493a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
494a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, 0);
495a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
496a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, 2);
497a2e73f56SAlex Deucher }
498a2e73f56SAlex Deucher 
499a2e73f56SAlex Deucher /**
500a2e73f56SAlex Deucher  * uvd_v4_2_ring_test_ring - register write test
501a2e73f56SAlex Deucher  *
502a2e73f56SAlex Deucher  * @ring: amdgpu_ring pointer
503a2e73f56SAlex Deucher  *
504a2e73f56SAlex Deucher  * Test if we can successfully write to the context register
505a2e73f56SAlex Deucher  */
uvd_v4_2_ring_test_ring(struct amdgpu_ring * ring)506a2e73f56SAlex Deucher static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
507a2e73f56SAlex Deucher {
508a2e73f56SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
509a2e73f56SAlex Deucher 	uint32_t tmp = 0;
510a2e73f56SAlex Deucher 	unsigned i;
511a2e73f56SAlex Deucher 	int r;
512a2e73f56SAlex Deucher 
513a2e73f56SAlex Deucher 	WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
514a27de35cSChristian König 	r = amdgpu_ring_alloc(ring, 3);
515dc9eeff8SChristian König 	if (r)
516a2e73f56SAlex Deucher 		return r;
517dc9eeff8SChristian König 
518a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
519a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, 0xDEADBEEF);
520a27de35cSChristian König 	amdgpu_ring_commit(ring);
521a2e73f56SAlex Deucher 	for (i = 0; i < adev->usec_timeout; i++) {
522a2e73f56SAlex Deucher 		tmp = RREG32(mmUVD_CONTEXT_ID);
523a2e73f56SAlex Deucher 		if (tmp == 0xDEADBEEF)
524a2e73f56SAlex Deucher 			break;
525c366be54SSam Ravnborg 		udelay(1);
526a2e73f56SAlex Deucher 	}
527a2e73f56SAlex Deucher 
528dc9eeff8SChristian König 	if (i >= adev->usec_timeout)
529dc9eeff8SChristian König 		r = -ETIMEDOUT;
530dc9eeff8SChristian König 
531a2e73f56SAlex Deucher 	return r;
532a2e73f56SAlex Deucher }
533a2e73f56SAlex Deucher 
534a2e73f56SAlex Deucher /**
535a2e73f56SAlex Deucher  * uvd_v4_2_ring_emit_ib - execute indirect buffer
536a2e73f56SAlex Deucher  *
537a2e73f56SAlex Deucher  * @ring: amdgpu_ring pointer
53844eb261cSLee Jones  * @job: iob associated with the indirect buffer
539a2e73f56SAlex Deucher  * @ib: indirect buffer to execute
54044eb261cSLee Jones  * @flags: flags associated with the indirect buffer
541a2e73f56SAlex Deucher  *
542a2e73f56SAlex Deucher  * Write ring commands to execute the indirect buffer
543a2e73f56SAlex Deucher  */
uvd_v4_2_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)544a2e73f56SAlex Deucher static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
54534955e03SRex Zhu 				  struct amdgpu_job *job,
546d88bf583SChristian König 				  struct amdgpu_ib *ib,
547c4c905ecSJack Xiao 				  uint32_t flags)
548a2e73f56SAlex Deucher {
549a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
550a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, ib->gpu_addr);
551a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
552a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, ib->length_dw);
553a2e73f56SAlex Deucher }
554a2e73f56SAlex Deucher 
uvd_v4_2_ring_insert_nop(struct amdgpu_ring * ring,uint32_t count)555def13903SLeo Liu static void uvd_v4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
556def13903SLeo Liu {
557def13903SLeo Liu 	int i;
558def13903SLeo Liu 
559def13903SLeo Liu 	WARN_ON(ring->wptr % 2 || count % 2);
560def13903SLeo Liu 
561def13903SLeo Liu 	for (i = 0; i < count / 2; i++) {
562def13903SLeo Liu 		amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
563def13903SLeo Liu 		amdgpu_ring_write(ring, 0);
564def13903SLeo Liu 	}
565def13903SLeo Liu }
566def13903SLeo Liu 
567a2e73f56SAlex Deucher /**
568a2e73f56SAlex Deucher  * uvd_v4_2_mc_resume - memory controller programming
569a2e73f56SAlex Deucher  *
570a2e73f56SAlex Deucher  * @adev: amdgpu_device pointer
571a2e73f56SAlex Deucher  *
572a2e73f56SAlex Deucher  * Let the UVD memory controller know it's offsets
573a2e73f56SAlex Deucher  */
uvd_v4_2_mc_resume(struct amdgpu_device * adev)574a2e73f56SAlex Deucher static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
575a2e73f56SAlex Deucher {
576a2e73f56SAlex Deucher 	uint64_t addr;
577a2e73f56SAlex Deucher 	uint32_t size;
578a2e73f56SAlex Deucher 
579f349f772SBernard Zhao 	/* program the VCPU memory controller bits 0-27 */
5802bb795f5SJames Zhu 	addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
581c1fe75c9SPiotr Redlewski 	size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
582a2e73f56SAlex Deucher 	WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
583a2e73f56SAlex Deucher 	WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
584a2e73f56SAlex Deucher 
585a2e73f56SAlex Deucher 	addr += size;
586c0365541SArindam Nath 	size = AMDGPU_UVD_HEAP_SIZE >> 3;
587a2e73f56SAlex Deucher 	WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr);
588a2e73f56SAlex Deucher 	WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
589a2e73f56SAlex Deucher 
590a2e73f56SAlex Deucher 	addr += size;
591c0365541SArindam Nath 	size = (AMDGPU_UVD_STACK_SIZE +
592c0365541SArindam Nath 	       (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3;
593a2e73f56SAlex Deucher 	WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr);
594a2e73f56SAlex Deucher 	WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
595a2e73f56SAlex Deucher 
596a2e73f56SAlex Deucher 	/* bits 28-31 */
5972bb795f5SJames Zhu 	addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF;
598a2e73f56SAlex Deucher 	WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
599a2e73f56SAlex Deucher 
600a2e73f56SAlex Deucher 	/* bits 32-39 */
6012bb795f5SJames Zhu 	addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF;
602a2e73f56SAlex Deucher 	WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
603a2e73f56SAlex Deucher 
60476ed6cb0SAlex Deucher 	WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
60576ed6cb0SAlex Deucher 	WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
60676ed6cb0SAlex Deucher 	WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
607a2e73f56SAlex Deucher }
608a2e73f56SAlex Deucher 
uvd_v4_2_enable_mgcg(struct amdgpu_device * adev,bool enable)609a2e73f56SAlex Deucher static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
610a2e73f56SAlex Deucher 				 bool enable)
611a2e73f56SAlex Deucher {
612a2e73f56SAlex Deucher 	u32 orig, data;
613a2e73f56SAlex Deucher 
614e3b04bc7SAlex Deucher 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
615a2e73f56SAlex Deucher 		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
616aa4747c0SRex Zhu 		data |= 0xfff;
617a2e73f56SAlex Deucher 		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
618a2e73f56SAlex Deucher 
619a2e73f56SAlex Deucher 		orig = data = RREG32(mmUVD_CGC_CTRL);
620a2e73f56SAlex Deucher 		data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
621a2e73f56SAlex Deucher 		if (orig != data)
622a2e73f56SAlex Deucher 			WREG32(mmUVD_CGC_CTRL, data);
623a2e73f56SAlex Deucher 	} else {
624a2e73f56SAlex Deucher 		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
625a2e73f56SAlex Deucher 		data &= ~0xfff;
626a2e73f56SAlex Deucher 		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
627a2e73f56SAlex Deucher 
628a2e73f56SAlex Deucher 		orig = data = RREG32(mmUVD_CGC_CTRL);
629a2e73f56SAlex Deucher 		data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
630a2e73f56SAlex Deucher 		if (orig != data)
631a2e73f56SAlex Deucher 			WREG32(mmUVD_CGC_CTRL, data);
632a2e73f56SAlex Deucher 	}
633a2e73f56SAlex Deucher }
634a2e73f56SAlex Deucher 
uvd_v4_2_set_dcm(struct amdgpu_device * adev,bool sw_mode)635a2e73f56SAlex Deucher static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
636a2e73f56SAlex Deucher 			     bool sw_mode)
637a2e73f56SAlex Deucher {
638a2e73f56SAlex Deucher 	u32 tmp, tmp2;
639a2e73f56SAlex Deucher 
640953618cfSRex Zhu 	WREG32_FIELD(UVD_CGC_GATE, REGS, 0);
641953618cfSRex Zhu 
642a2e73f56SAlex Deucher 	tmp = RREG32(mmUVD_CGC_CTRL);
643a2e73f56SAlex Deucher 	tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
644a2e73f56SAlex Deucher 	tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
645a2e73f56SAlex Deucher 		(1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) |
646a2e73f56SAlex Deucher 		(4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT);
647a2e73f56SAlex Deucher 
648a2e73f56SAlex Deucher 	if (sw_mode) {
649a2e73f56SAlex Deucher 		tmp &= ~0x7ffff800;
650a2e73f56SAlex Deucher 		tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK |
651a2e73f56SAlex Deucher 			UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK |
652a2e73f56SAlex Deucher 			(7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT);
653a2e73f56SAlex Deucher 	} else {
654a2e73f56SAlex Deucher 		tmp |= 0x7ffff800;
655a2e73f56SAlex Deucher 		tmp2 = 0;
656a2e73f56SAlex Deucher 	}
657a2e73f56SAlex Deucher 
658a2e73f56SAlex Deucher 	WREG32(mmUVD_CGC_CTRL, tmp);
659a2e73f56SAlex Deucher 	WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
660a2e73f56SAlex Deucher }
661a2e73f56SAlex Deucher 
uvd_v4_2_is_idle(void * handle)6625fc3aeebSyanyang1 static bool uvd_v4_2_is_idle(void *handle)
663a2e73f56SAlex Deucher {
6645fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6655fc3aeebSyanyang1 
666a2e73f56SAlex Deucher 	return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
667a2e73f56SAlex Deucher }
668a2e73f56SAlex Deucher 
uvd_v4_2_wait_for_idle(void * handle)6695fc3aeebSyanyang1 static int uvd_v4_2_wait_for_idle(void *handle)
670a2e73f56SAlex Deucher {
671a2e73f56SAlex Deucher 	unsigned i;
6725fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
673a2e73f56SAlex Deucher 
674a2e73f56SAlex Deucher 	for (i = 0; i < adev->usec_timeout; i++) {
675a2e73f56SAlex Deucher 		if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
676a2e73f56SAlex Deucher 			return 0;
677a2e73f56SAlex Deucher 	}
678a2e73f56SAlex Deucher 	return -ETIMEDOUT;
679a2e73f56SAlex Deucher }
680a2e73f56SAlex Deucher 
uvd_v4_2_soft_reset(void * handle)6815fc3aeebSyanyang1 static int uvd_v4_2_soft_reset(void *handle)
682a2e73f56SAlex Deucher {
6835fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6845fc3aeebSyanyang1 
685a2e73f56SAlex Deucher 	uvd_v4_2_stop(adev);
686a2e73f56SAlex Deucher 
687a2e73f56SAlex Deucher 	WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
688a2e73f56SAlex Deucher 			~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
689a2e73f56SAlex Deucher 	mdelay(5);
690a2e73f56SAlex Deucher 
691a2e73f56SAlex Deucher 	return uvd_v4_2_start(adev);
692a2e73f56SAlex Deucher }
693a2e73f56SAlex Deucher 
uvd_v4_2_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)694a2e73f56SAlex Deucher static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev,
695a2e73f56SAlex Deucher 					struct amdgpu_irq_src *source,
696a2e73f56SAlex Deucher 					unsigned type,
697a2e73f56SAlex Deucher 					enum amdgpu_interrupt_state state)
698a2e73f56SAlex Deucher {
699a2e73f56SAlex Deucher 	// TODO
700a2e73f56SAlex Deucher 	return 0;
701a2e73f56SAlex Deucher }
702a2e73f56SAlex Deucher 
uvd_v4_2_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)703a2e73f56SAlex Deucher static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
704a2e73f56SAlex Deucher 				      struct amdgpu_irq_src *source,
705a2e73f56SAlex Deucher 				      struct amdgpu_iv_entry *entry)
706a2e73f56SAlex Deucher {
707a2e73f56SAlex Deucher 	DRM_DEBUG("IH: UVD TRAP\n");
7082bb795f5SJames Zhu 	amdgpu_fence_process(&adev->uvd.inst->ring);
709a2e73f56SAlex Deucher 	return 0;
710a2e73f56SAlex Deucher }
711a2e73f56SAlex Deucher 
uvd_v4_2_set_clockgating_state(void * handle,enum amd_clockgating_state state)7125fc3aeebSyanyang1 static int uvd_v4_2_set_clockgating_state(void *handle,
7135fc3aeebSyanyang1 					  enum amd_clockgating_state state)
714a2e73f56SAlex Deucher {
715a2e73f56SAlex Deucher 	return 0;
716a2e73f56SAlex Deucher }
717a2e73f56SAlex Deucher 
uvd_v4_2_set_powergating_state(void * handle,enum amd_powergating_state state)7185fc3aeebSyanyang1 static int uvd_v4_2_set_powergating_state(void *handle,
7195fc3aeebSyanyang1 					  enum amd_powergating_state state)
720a2e73f56SAlex Deucher {
721a2e73f56SAlex Deucher 	/* This doesn't actually powergate the UVD block.
722a2e73f56SAlex Deucher 	 * That's done in the dpm code via the SMC.  This
723a2e73f56SAlex Deucher 	 * just re-inits the block as necessary.  The actual
724a2e73f56SAlex Deucher 	 * gating still happens in the dpm code.  We should
725a2e73f56SAlex Deucher 	 * revisit this when there is a cleaner line between
726a2e73f56SAlex Deucher 	 * the smc and the hw blocks
727a2e73f56SAlex Deucher 	 */
7285fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7295fc3aeebSyanyang1 
7305fc3aeebSyanyang1 	if (state == AMD_PG_STATE_GATE) {
731a2e73f56SAlex Deucher 		uvd_v4_2_stop(adev);
732b13aa109SRex Zhu 		if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
733254cd2e0SRex Zhu 			if (!(RREG32_SMC(ixCURRENT_PG_STATUS) &
734254cd2e0SRex Zhu 				CURRENT_PG_STATUS__UVD_PG_STATUS_MASK)) {
7353a786966SRex Zhu 				WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK   |
7363a786966SRex Zhu 							UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK |
7373a786966SRex Zhu 							UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
7383a786966SRex Zhu 				mdelay(20);
7393a786966SRex Zhu 			}
7403a786966SRex Zhu 		}
741a2e73f56SAlex Deucher 		return 0;
742a2e73f56SAlex Deucher 	} else {
743b13aa109SRex Zhu 		if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
744254cd2e0SRex Zhu 			if (RREG32_SMC(ixCURRENT_PG_STATUS) &
745254cd2e0SRex Zhu 				CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
7463a786966SRex Zhu 				WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK   |
7473a786966SRex Zhu 						UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK |
7483a786966SRex Zhu 						UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
7493a786966SRex Zhu 				mdelay(30);
7503a786966SRex Zhu 			}
7513a786966SRex Zhu 		}
752a2e73f56SAlex Deucher 		return uvd_v4_2_start(adev);
753a2e73f56SAlex Deucher 	}
754a2e73f56SAlex Deucher }
755a2e73f56SAlex Deucher 
756a1255107SAlex Deucher static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
75788a907d6STom St Denis 	.name = "uvd_v4_2",
758a2e73f56SAlex Deucher 	.early_init = uvd_v4_2_early_init,
759a2e73f56SAlex Deucher 	.late_init = NULL,
760a2e73f56SAlex Deucher 	.sw_init = uvd_v4_2_sw_init,
761a2e73f56SAlex Deucher 	.sw_fini = uvd_v4_2_sw_fini,
762a2e73f56SAlex Deucher 	.hw_init = uvd_v4_2_hw_init,
763a2e73f56SAlex Deucher 	.hw_fini = uvd_v4_2_hw_fini,
764db998890SMario Limonciello 	.prepare_suspend = uvd_v4_2_prepare_suspend,
765a2e73f56SAlex Deucher 	.suspend = uvd_v4_2_suspend,
766a2e73f56SAlex Deucher 	.resume = uvd_v4_2_resume,
767a2e73f56SAlex Deucher 	.is_idle = uvd_v4_2_is_idle,
768a2e73f56SAlex Deucher 	.wait_for_idle = uvd_v4_2_wait_for_idle,
769a2e73f56SAlex Deucher 	.soft_reset = uvd_v4_2_soft_reset,
770a2e73f56SAlex Deucher 	.set_clockgating_state = uvd_v4_2_set_clockgating_state,
771a2e73f56SAlex Deucher 	.set_powergating_state = uvd_v4_2_set_powergating_state,
772e21d253bSSunil Khatri 	.dump_ip_state = NULL,
773*40356542SSunil Khatri 	.print_ip_state = NULL,
774a2e73f56SAlex Deucher };
775a2e73f56SAlex Deucher 
776a2e73f56SAlex Deucher static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
77721cd942eSChristian König 	.type = AMDGPU_RING_TYPE_UVD,
77879887142SChristian König 	.align_mask = 0xf,
779536fbf94SKen Wang 	.support_64bit_ptrs = false,
7807ee250b1SLeo Liu 	.no_user_fence = true,
781a2e73f56SAlex Deucher 	.get_rptr = uvd_v4_2_ring_get_rptr,
782a2e73f56SAlex Deucher 	.get_wptr = uvd_v4_2_ring_get_wptr,
783a2e73f56SAlex Deucher 	.set_wptr = uvd_v4_2_ring_set_wptr,
784a2e73f56SAlex Deucher 	.parse_cs = amdgpu_uvd_ring_parse_cs,
785e12f3d7aSChristian König 	.emit_frame_size =
786e12f3d7aSChristian König 		14, /* uvd_v4_2_ring_emit_fence  x1 no user fence */
787e12f3d7aSChristian König 	.emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */
788a2e73f56SAlex Deucher 	.emit_ib = uvd_v4_2_ring_emit_ib,
789a2e73f56SAlex Deucher 	.emit_fence = uvd_v4_2_ring_emit_fence,
790a2e73f56SAlex Deucher 	.test_ring = uvd_v4_2_ring_test_ring,
7918de190c9SChristian König 	.test_ib = amdgpu_uvd_ring_test_ib,
792def13903SLeo Liu 	.insert_nop = uvd_v4_2_ring_insert_nop,
7939e5d5309SChristian König 	.pad_ib = amdgpu_ring_generic_pad_ib,
794c4120d55SChristian König 	.begin_use = amdgpu_uvd_ring_begin_use,
795c4120d55SChristian König 	.end_use = amdgpu_uvd_ring_end_use,
796a2e73f56SAlex Deucher };
797a2e73f56SAlex Deucher 
uvd_v4_2_set_ring_funcs(struct amdgpu_device * adev)798a2e73f56SAlex Deucher static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
799a2e73f56SAlex Deucher {
8002bb795f5SJames Zhu 	adev->uvd.inst->ring.funcs = &uvd_v4_2_ring_funcs;
801a2e73f56SAlex Deucher }
802a2e73f56SAlex Deucher 
803a2e73f56SAlex Deucher static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = {
804a2e73f56SAlex Deucher 	.set = uvd_v4_2_set_interrupt_state,
805a2e73f56SAlex Deucher 	.process = uvd_v4_2_process_interrupt,
806a2e73f56SAlex Deucher };
807a2e73f56SAlex Deucher 
uvd_v4_2_set_irq_funcs(struct amdgpu_device * adev)808a2e73f56SAlex Deucher static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev)
809a2e73f56SAlex Deucher {
8102bb795f5SJames Zhu 	adev->uvd.inst->irq.num_types = 1;
8112bb795f5SJames Zhu 	adev->uvd.inst->irq.funcs = &uvd_v4_2_irq_funcs;
812a2e73f56SAlex Deucher }
813a1255107SAlex Deucher 
814a1255107SAlex Deucher const struct amdgpu_ip_block_version uvd_v4_2_ip_block =
815a1255107SAlex Deucher {
816a1255107SAlex Deucher 		.type = AMD_IP_BLOCK_TYPE_UVD,
817a1255107SAlex Deucher 		.major = 4,
818a1255107SAlex Deucher 		.minor = 2,
819a1255107SAlex Deucher 		.rev = 0,
820a1255107SAlex Deucher 		.funcs = &uvd_v4_2_ip_funcs,
821a1255107SAlex Deucher };
822