xref: /linux/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c (revision 7a5f1cd22d47f8ca4b760b6334378ae42c1bd24b)
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Christian König <christian.koenig@amd.com>
23  */
24 
25 #include <linux/firmware.h>
26 
27 #include "amdgpu.h"
28 #include "amdgpu_uvd.h"
29 #include "cikd.h"
30 
31 #include "uvd/uvd_4_2_d.h"
32 #include "uvd/uvd_4_2_sh_mask.h"
33 
34 #include "oss/oss_2_0_d.h"
35 #include "oss/oss_2_0_sh_mask.h"
36 
37 #include "bif/bif_4_1_d.h"
38 
39 #include "smu/smu_7_0_1_d.h"
40 #include "smu/smu_7_0_1_sh_mask.h"
41 
42 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
43 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
44 static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
45 static int uvd_v4_2_start(struct amdgpu_device *adev);
46 static void uvd_v4_2_stop(struct amdgpu_device *adev);
47 static int uvd_v4_2_set_clockgating_state(struct amdgpu_ip_block *ip_block,
48 				enum amd_clockgating_state state);
49 static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
50 			     bool sw_mode);
51 /**
52  * uvd_v4_2_ring_get_rptr - get read pointer
53  *
54  * @ring: amdgpu_ring pointer
55  *
56  * Returns the current hardware read pointer
57  */
58 static uint64_t uvd_v4_2_ring_get_rptr(struct amdgpu_ring *ring)
59 {
60 	struct amdgpu_device *adev = ring->adev;
61 
62 	return RREG32(mmUVD_RBC_RB_RPTR);
63 }
64 
65 /**
66  * uvd_v4_2_ring_get_wptr - get write pointer
67  *
68  * @ring: amdgpu_ring pointer
69  *
70  * Returns the current hardware write pointer
71  */
72 static uint64_t uvd_v4_2_ring_get_wptr(struct amdgpu_ring *ring)
73 {
74 	struct amdgpu_device *adev = ring->adev;
75 
76 	return RREG32(mmUVD_RBC_RB_WPTR);
77 }
78 
79 /**
80  * uvd_v4_2_ring_set_wptr - set write pointer
81  *
82  * @ring: amdgpu_ring pointer
83  *
84  * Commits the write pointer to the hardware
85  */
86 static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring)
87 {
88 	struct amdgpu_device *adev = ring->adev;
89 
90 	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
91 }
92 
93 static int uvd_v4_2_early_init(struct amdgpu_ip_block *ip_block)
94 {
95 	struct amdgpu_device *adev = ip_block->adev;
96 
97 	/* UVD doesn't work without DPM, it needs DPM to ungate it. */
98 	if (!amdgpu_dpm)
99 		return -ENOENT;
100 
101 	adev->uvd.num_uvd_inst = 1;
102 
103 	uvd_v4_2_set_ring_funcs(adev);
104 	uvd_v4_2_set_irq_funcs(adev);
105 
106 	return 0;
107 }
108 
109 static int uvd_v4_2_sw_init(struct amdgpu_ip_block *ip_block)
110 {
111 	struct amdgpu_ring *ring;
112 	struct amdgpu_device *adev = ip_block->adev;
113 	int r;
114 
115 	/* UVD TRAP */
116 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
117 	if (r)
118 		return r;
119 
120 	r = amdgpu_uvd_sw_init(adev);
121 	if (r)
122 		return r;
123 
124 	ring = &adev->uvd.inst->ring;
125 	sprintf(ring->name, "uvd");
126 	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
127 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
128 	if (r)
129 		return r;
130 
131 	r = amdgpu_uvd_resume(adev);
132 	if (r)
133 		return r;
134 
135 	return r;
136 }
137 
138 static int uvd_v4_2_sw_fini(struct amdgpu_ip_block *ip_block)
139 {
140 	int r;
141 	struct amdgpu_device *adev = ip_block->adev;
142 
143 	r = amdgpu_uvd_suspend(adev);
144 	if (r)
145 		return r;
146 
147 	return amdgpu_uvd_sw_fini(adev);
148 }
149 
150 static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
151 				 bool enable);
152 /**
153  * uvd_v4_2_hw_init - start and test UVD block
154  *
155  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
156  *
157  * Initialize the hardware, boot up the VCPU and do some testing
158  */
159 static int uvd_v4_2_hw_init(struct amdgpu_ip_block *ip_block)
160 {
161 	struct amdgpu_device *adev = ip_block->adev;
162 	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
163 	uint32_t tmp;
164 	int r;
165 
166 	uvd_v4_2_enable_mgcg(adev, true);
167 	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
168 
169 	r = amdgpu_ring_test_helper(ring);
170 	if (r)
171 		goto done;
172 
173 	r = amdgpu_ring_alloc(ring, 10);
174 	if (r) {
175 		drm_err(adev_to_drm(adev), "ring alloc failed (%d).\n", r);
176 		goto done;
177 	}
178 
179 	tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
180 	amdgpu_ring_write(ring, tmp);
181 	amdgpu_ring_write(ring, 0xFFFFF);
182 
183 	tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
184 	amdgpu_ring_write(ring, tmp);
185 	amdgpu_ring_write(ring, 0xFFFFF);
186 
187 	tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
188 	amdgpu_ring_write(ring, tmp);
189 	amdgpu_ring_write(ring, 0xFFFFF);
190 
191 	/* Clear timeout status bits */
192 	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
193 	amdgpu_ring_write(ring, 0x8);
194 
195 	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
196 	amdgpu_ring_write(ring, 3);
197 
198 	amdgpu_ring_commit(ring);
199 
200 done:
201 	if (!r)
202 		drm_info(adev_to_drm(adev), "UVD initialized successfully.\n");
203 
204 	return r;
205 }
206 
207 /**
208  * uvd_v4_2_hw_fini - stop the hardware block
209  *
210  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
211  *
212  * Stop the UVD block, mark ring as not ready any more
213  */
214 static int uvd_v4_2_hw_fini(struct amdgpu_ip_block *ip_block)
215 {
216 	struct amdgpu_device *adev = ip_block->adev;
217 
218 	cancel_delayed_work_sync(&adev->uvd.idle_work);
219 
220 	if (RREG32(mmUVD_STATUS) != 0)
221 		uvd_v4_2_stop(adev);
222 
223 	return 0;
224 }
225 
226 static int uvd_v4_2_prepare_suspend(struct amdgpu_ip_block *ip_block)
227 {
228 	struct amdgpu_device *adev = ip_block->adev;
229 
230 	return amdgpu_uvd_prepare_suspend(adev);
231 }
232 
233 static int uvd_v4_2_suspend(struct amdgpu_ip_block *ip_block)
234 {
235 	int r;
236 	struct amdgpu_device *adev = ip_block->adev;
237 
238 	/*
239 	 * Proper cleanups before halting the HW engine:
240 	 *   - cancel the delayed idle work
241 	 *   - enable powergating
242 	 *   - enable clockgating
243 	 *   - disable dpm
244 	 *
245 	 * TODO: to align with the VCN implementation, move the
246 	 * jobs for clockgating/powergating/dpm setting to
247 	 * ->set_powergating_state().
248 	 */
249 	cancel_delayed_work_sync(&adev->uvd.idle_work);
250 
251 	if (adev->pm.dpm_enabled) {
252 		amdgpu_dpm_enable_uvd(adev, false);
253 	} else {
254 		amdgpu_asic_set_uvd_clocks(adev, 0, 0);
255 		/* shutdown the UVD block */
256 		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
257 						       AMD_PG_STATE_GATE);
258 		amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
259 						       AMD_CG_STATE_GATE);
260 	}
261 
262 	r = uvd_v4_2_hw_fini(ip_block);
263 	if (r)
264 		return r;
265 
266 	return amdgpu_uvd_suspend(adev);
267 }
268 
269 static int uvd_v4_2_resume(struct amdgpu_ip_block *ip_block)
270 {
271 	int r;
272 
273 	r = amdgpu_uvd_resume(ip_block->adev);
274 	if (r)
275 		return r;
276 
277 	return uvd_v4_2_hw_init(ip_block);
278 }
279 
280 /**
281  * uvd_v4_2_start - start UVD block
282  *
283  * @adev: amdgpu_device pointer
284  *
285  * Setup and start the UVD block
286  */
287 static int uvd_v4_2_start(struct amdgpu_device *adev)
288 {
289 	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
290 	uint32_t rb_bufsz;
291 	int i, j, r;
292 	u32 tmp;
293 	/* disable byte swapping */
294 	u32 lmi_swap_cntl = 0;
295 	u32 mp_swap_cntl = 0;
296 
297 	/* set uvd busy */
298 	WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2));
299 
300 	uvd_v4_2_set_dcm(adev, true);
301 	WREG32(mmUVD_CGC_GATE, 0);
302 
303 	/* take UVD block out of reset */
304 	WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
305 	mdelay(5);
306 
307 	/* enable VCPU clock */
308 	WREG32(mmUVD_VCPU_CNTL,  1 << 9);
309 
310 	/* disable interrupt */
311 	WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
312 
313 #ifdef __BIG_ENDIAN
314 	/* swap (8 in 32) RB and IB */
315 	lmi_swap_cntl = 0xa;
316 	mp_swap_cntl = 0;
317 #endif
318 	WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
319 	WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
320 
321 	/* initialize UVD memory controller */
322 	WREG32(mmUVD_LMI_CTRL, 0x203108);
323 
324 	tmp = RREG32(mmUVD_MPC_CNTL);
325 	WREG32(mmUVD_MPC_CNTL, tmp | 0x10);
326 
327 	WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
328 	WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
329 	WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
330 	WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
331 	WREG32(mmUVD_MPC_SET_ALU, 0);
332 	WREG32(mmUVD_MPC_SET_MUX, 0x88);
333 
334 	uvd_v4_2_mc_resume(adev);
335 
336 	tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL);
337 	WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10));
338 
339 	/* enable UMC */
340 	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
341 
342 	WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
343 
344 	WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
345 
346 	WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
347 
348 	mdelay(10);
349 
350 	for (i = 0; i < 10; ++i) {
351 		uint32_t status;
352 		for (j = 0; j < 100; ++j) {
353 			status = RREG32(mmUVD_STATUS);
354 			if (status & 2)
355 				break;
356 			mdelay(10);
357 		}
358 		r = 0;
359 		if (status & 2)
360 			break;
361 
362 		DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
363 		WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
364 				~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
365 		mdelay(10);
366 		WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
367 		mdelay(10);
368 		r = -1;
369 	}
370 
371 	if (r) {
372 		DRM_ERROR("UVD not responding, giving up!!!\n");
373 		return r;
374 	}
375 
376 	/* enable interupt */
377 	WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1));
378 
379 	WREG32_P(mmUVD_STATUS, 0, ~(1<<2));
380 
381 	/* force RBC into idle state */
382 	WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
383 
384 	/* Set the write pointer delay */
385 	WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
386 
387 	/* program the 4GB memory segment for rptr and ring buffer */
388 	WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
389 				   (0x7 << 16) | (0x1 << 31));
390 
391 	/* Initialize the ring buffer's read and write pointers */
392 	WREG32(mmUVD_RBC_RB_RPTR, 0x0);
393 
394 	ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
395 	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
396 
397 	/* set the ring address */
398 	WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr);
399 
400 	/* Set ring buffer size */
401 	rb_bufsz = order_base_2(ring->ring_size);
402 	rb_bufsz = (0x1 << 8) | rb_bufsz;
403 	WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f);
404 
405 	return 0;
406 }
407 
408 /**
409  * uvd_v4_2_stop - stop UVD block
410  *
411  * @adev: amdgpu_device pointer
412  *
413  * stop the UVD block
414  */
415 static void uvd_v4_2_stop(struct amdgpu_device *adev)
416 {
417 	uint32_t i, j;
418 	uint32_t status;
419 
420 	WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
421 
422 	for (i = 0; i < 10; ++i) {
423 		for (j = 0; j < 100; ++j) {
424 			status = RREG32(mmUVD_STATUS);
425 			if (status & 2)
426 				break;
427 			mdelay(1);
428 		}
429 		if (status & 2)
430 			break;
431 	}
432 
433 	for (i = 0; i < 10; ++i) {
434 		for (j = 0; j < 100; ++j) {
435 			status = RREG32(mmUVD_LMI_STATUS);
436 			if (status & 0xf)
437 				break;
438 			mdelay(1);
439 		}
440 		if (status & 0xf)
441 			break;
442 	}
443 
444 	/* Stall UMC and register bus before resetting VCPU */
445 	WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
446 
447 	for (i = 0; i < 10; ++i) {
448 		for (j = 0; j < 100; ++j) {
449 			status = RREG32(mmUVD_LMI_STATUS);
450 			if (status & 0x240)
451 				break;
452 			mdelay(1);
453 		}
454 		if (status & 0x240)
455 			break;
456 	}
457 
458 	WREG32_P(0x3D49, 0, ~(1 << 2));
459 
460 	WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9));
461 
462 	/* put LMI, VCPU, RBC etc... into reset */
463 	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
464 		UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
465 		UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
466 
467 	WREG32(mmUVD_STATUS, 0);
468 
469 	uvd_v4_2_set_dcm(adev, false);
470 }
471 
472 /**
473  * uvd_v4_2_ring_emit_fence - emit an fence & trap command
474  *
475  * @ring: amdgpu_ring pointer
476  * @addr: address
477  * @seq: sequence number
478  * @flags: fence related flags
479  *
480  * Write a fence and a trap command to the ring.
481  */
482 static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
483 				     unsigned flags)
484 {
485 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
486 
487 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
488 	amdgpu_ring_write(ring, seq);
489 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
490 	amdgpu_ring_write(ring, addr & 0xffffffff);
491 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
492 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
493 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
494 	amdgpu_ring_write(ring, 0);
495 
496 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
497 	amdgpu_ring_write(ring, 0);
498 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
499 	amdgpu_ring_write(ring, 0);
500 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
501 	amdgpu_ring_write(ring, 2);
502 }
503 
504 /**
505  * uvd_v4_2_ring_test_ring - register write test
506  *
507  * @ring: amdgpu_ring pointer
508  *
509  * Test if we can successfully write to the context register
510  */
511 static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
512 {
513 	struct amdgpu_device *adev = ring->adev;
514 	uint32_t tmp = 0;
515 	unsigned i;
516 	int r;
517 
518 	WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
519 	r = amdgpu_ring_alloc(ring, 3);
520 	if (r)
521 		return r;
522 
523 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
524 	amdgpu_ring_write(ring, 0xDEADBEEF);
525 	amdgpu_ring_commit(ring);
526 	for (i = 0; i < adev->usec_timeout; i++) {
527 		tmp = RREG32(mmUVD_CONTEXT_ID);
528 		if (tmp == 0xDEADBEEF)
529 			break;
530 		udelay(1);
531 	}
532 
533 	if (i >= adev->usec_timeout)
534 		r = -ETIMEDOUT;
535 
536 	return r;
537 }
538 
539 /**
540  * uvd_v4_2_ring_emit_ib - execute indirect buffer
541  *
542  * @ring: amdgpu_ring pointer
543  * @job: iob associated with the indirect buffer
544  * @ib: indirect buffer to execute
545  * @flags: flags associated with the indirect buffer
546  *
547  * Write ring commands to execute the indirect buffer
548  */
549 static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
550 				  struct amdgpu_job *job,
551 				  struct amdgpu_ib *ib,
552 				  uint32_t flags)
553 {
554 	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
555 	amdgpu_ring_write(ring, ib->gpu_addr);
556 	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
557 	amdgpu_ring_write(ring, ib->length_dw);
558 }
559 
560 static void uvd_v4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
561 {
562 	int i;
563 
564 	WARN_ON(ring->wptr % 2 || count % 2);
565 
566 	for (i = 0; i < count / 2; i++) {
567 		amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
568 		amdgpu_ring_write(ring, 0);
569 	}
570 }
571 
572 /**
573  * uvd_v4_2_mc_resume - memory controller programming
574  *
575  * @adev: amdgpu_device pointer
576  *
577  * Let the UVD memory controller know it's offsets
578  */
579 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
580 {
581 	uint64_t addr;
582 	uint32_t size;
583 
584 	/* program the VCPU memory controller bits 0-27 */
585 	addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
586 	size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
587 	WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
588 	WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
589 
590 	addr += size;
591 	size = AMDGPU_UVD_HEAP_SIZE >> 3;
592 	WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr);
593 	WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
594 
595 	addr += size;
596 	size = (AMDGPU_UVD_STACK_SIZE +
597 	       (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3;
598 	WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr);
599 	WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
600 
601 	/* bits 28-31 */
602 	addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF;
603 	WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
604 
605 	/* bits 32-39 */
606 	addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF;
607 	WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
608 
609 	WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
610 	WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
611 	WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
612 }
613 
614 static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
615 				 bool enable)
616 {
617 	u32 orig, data;
618 
619 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
620 		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
621 		data |= 0xfff;
622 		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
623 
624 		orig = data = RREG32(mmUVD_CGC_CTRL);
625 		data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
626 		if (orig != data)
627 			WREG32(mmUVD_CGC_CTRL, data);
628 	} else {
629 		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
630 		data &= ~0xfff;
631 		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
632 
633 		orig = data = RREG32(mmUVD_CGC_CTRL);
634 		data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
635 		if (orig != data)
636 			WREG32(mmUVD_CGC_CTRL, data);
637 	}
638 }
639 
640 static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
641 			     bool sw_mode)
642 {
643 	u32 tmp, tmp2;
644 
645 	WREG32_FIELD(UVD_CGC_GATE, REGS, 0);
646 
647 	tmp = RREG32(mmUVD_CGC_CTRL);
648 	tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
649 	tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
650 		(1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) |
651 		(4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT);
652 
653 	if (sw_mode) {
654 		tmp &= ~0x7ffff800;
655 		tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK |
656 			UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK |
657 			(7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT);
658 	} else {
659 		tmp |= 0x7ffff800;
660 		tmp2 = 0;
661 	}
662 
663 	WREG32(mmUVD_CGC_CTRL, tmp);
664 	WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
665 }
666 
667 static bool uvd_v4_2_is_idle(struct amdgpu_ip_block *ip_block)
668 {
669 	struct amdgpu_device *adev = ip_block->adev;
670 
671 	return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
672 }
673 
674 static int uvd_v4_2_wait_for_idle(struct amdgpu_ip_block *ip_block)
675 {
676 	unsigned i;
677 	struct amdgpu_device *adev = ip_block->adev;
678 
679 	for (i = 0; i < adev->usec_timeout; i++) {
680 		if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
681 			return 0;
682 	}
683 	return -ETIMEDOUT;
684 }
685 
686 static int uvd_v4_2_soft_reset(struct amdgpu_ip_block *ip_block)
687 {
688 	struct amdgpu_device *adev = ip_block->adev;
689 
690 	uvd_v4_2_stop(adev);
691 
692 	WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
693 			~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
694 	mdelay(5);
695 
696 	return uvd_v4_2_start(adev);
697 }
698 
699 static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev,
700 					struct amdgpu_irq_src *source,
701 					unsigned type,
702 					enum amdgpu_interrupt_state state)
703 {
704 	// TODO
705 	return 0;
706 }
707 
708 static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
709 				      struct amdgpu_irq_src *source,
710 				      struct amdgpu_iv_entry *entry)
711 {
712 	DRM_DEBUG("IH: UVD TRAP\n");
713 	amdgpu_fence_process(&adev->uvd.inst->ring);
714 	return 0;
715 }
716 
717 static int uvd_v4_2_set_clockgating_state(struct amdgpu_ip_block *ip_block,
718 					  enum amd_clockgating_state state)
719 {
720 	return 0;
721 }
722 
723 static int uvd_v4_2_set_powergating_state(struct amdgpu_ip_block *ip_block,
724 					  enum amd_powergating_state state)
725 {
726 	/* This doesn't actually powergate the UVD block.
727 	 * That's done in the dpm code via the SMC.  This
728 	 * just re-inits the block as necessary.  The actual
729 	 * gating still happens in the dpm code.  We should
730 	 * revisit this when there is a cleaner line between
731 	 * the smc and the hw blocks
732 	 */
733 	struct amdgpu_device *adev = ip_block->adev;
734 
735 	if (state == AMD_PG_STATE_GATE) {
736 		uvd_v4_2_stop(adev);
737 		if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
738 			if (!(RREG32_SMC(ixCURRENT_PG_STATUS) &
739 				CURRENT_PG_STATUS__UVD_PG_STATUS_MASK)) {
740 				WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK   |
741 							UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK |
742 							UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
743 				mdelay(20);
744 			}
745 		}
746 		return 0;
747 	} else {
748 		if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
749 			if (RREG32_SMC(ixCURRENT_PG_STATUS) &
750 				CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
751 				WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK   |
752 						UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK |
753 						UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
754 				mdelay(30);
755 			}
756 		}
757 		return uvd_v4_2_start(adev);
758 	}
759 }
760 
761 static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
762 	.name = "uvd_v4_2",
763 	.early_init = uvd_v4_2_early_init,
764 	.sw_init = uvd_v4_2_sw_init,
765 	.sw_fini = uvd_v4_2_sw_fini,
766 	.hw_init = uvd_v4_2_hw_init,
767 	.hw_fini = uvd_v4_2_hw_fini,
768 	.prepare_suspend = uvd_v4_2_prepare_suspend,
769 	.suspend = uvd_v4_2_suspend,
770 	.resume = uvd_v4_2_resume,
771 	.is_idle = uvd_v4_2_is_idle,
772 	.wait_for_idle = uvd_v4_2_wait_for_idle,
773 	.soft_reset = uvd_v4_2_soft_reset,
774 	.set_clockgating_state = uvd_v4_2_set_clockgating_state,
775 	.set_powergating_state = uvd_v4_2_set_powergating_state,
776 };
777 
778 static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
779 	.type = AMDGPU_RING_TYPE_UVD,
780 	.align_mask = 0xf,
781 	.support_64bit_ptrs = false,
782 	.no_user_fence = true,
783 	.get_rptr = uvd_v4_2_ring_get_rptr,
784 	.get_wptr = uvd_v4_2_ring_get_wptr,
785 	.set_wptr = uvd_v4_2_ring_set_wptr,
786 	.parse_cs = amdgpu_uvd_ring_parse_cs,
787 	.emit_frame_size =
788 		14, /* uvd_v4_2_ring_emit_fence  x1 no user fence */
789 	.emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */
790 	.emit_ib = uvd_v4_2_ring_emit_ib,
791 	.emit_fence = uvd_v4_2_ring_emit_fence,
792 	.test_ring = uvd_v4_2_ring_test_ring,
793 	.test_ib = amdgpu_uvd_ring_test_ib,
794 	.insert_nop = uvd_v4_2_ring_insert_nop,
795 	.pad_ib = amdgpu_ring_generic_pad_ib,
796 	.begin_use = amdgpu_uvd_ring_begin_use,
797 	.end_use = amdgpu_uvd_ring_end_use,
798 };
799 
800 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
801 {
802 	adev->uvd.inst->ring.funcs = &uvd_v4_2_ring_funcs;
803 }
804 
805 static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = {
806 	.set = uvd_v4_2_set_interrupt_state,
807 	.process = uvd_v4_2_process_interrupt,
808 };
809 
810 static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev)
811 {
812 	adev->uvd.inst->irq.num_types = 1;
813 	adev->uvd.inst->irq.funcs = &uvd_v4_2_irq_funcs;
814 }
815 
816 const struct amdgpu_ip_block_version uvd_v4_2_ip_block =
817 {
818 		.type = AMD_IP_BLOCK_TYPE_UVD,
819 		.major = 4,
820 		.minor = 2,
821 		.rev = 0,
822 		.funcs = &uvd_v4_2_ip_funcs,
823 };
824