xref: /linux/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c (revision 99a97a8ba9881fc47901ff36b057e5cd0bf06af0)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 #include "amdgpu_uvd.h"
28 #include "soc15d.h"
29 #include "soc15_common.h"
30 
31 #include "vega10/soc15ip.h"
32 #include "vega10/UVD/uvd_7_0_offset.h"
33 #include "vega10/UVD/uvd_7_0_sh_mask.h"
34 #include "vega10/NBIF/nbif_6_1_offset.h"
35 #include "vega10/HDP/hdp_4_0_offset.h"
36 #include "vega10/MMHUB/mmhub_1_0_offset.h"
37 #include "vega10/MMHUB/mmhub_1_0_sh_mask.h"
38 
39 static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
40 static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
41 static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
42 static int uvd_v7_0_start(struct amdgpu_device *adev);
43 static void uvd_v7_0_stop(struct amdgpu_device *adev);
44 
45 /**
46  * uvd_v7_0_ring_get_rptr - get read pointer
47  *
48  * @ring: amdgpu_ring pointer
49  *
50  * Returns the current hardware read pointer
51  */
52 static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
53 {
54 	struct amdgpu_device *adev = ring->adev;
55 
56 	return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR));
57 }
58 
59 /**
60  * uvd_v7_0_enc_ring_get_rptr - get enc read pointer
61  *
62  * @ring: amdgpu_ring pointer
63  *
64  * Returns the current hardware enc read pointer
65  */
66 static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
67 {
68 	struct amdgpu_device *adev = ring->adev;
69 
70 	if (ring == &adev->uvd.ring_enc[0])
71 		return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_RPTR));
72 	else
73 		return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_RPTR2));
74 }
75 
76 /**
77  * uvd_v7_0_ring_get_wptr - get write pointer
78  *
79  * @ring: amdgpu_ring pointer
80  *
81  * Returns the current hardware write pointer
82  */
83 static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
84 {
85 	struct amdgpu_device *adev = ring->adev;
86 
87 	return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR));
88 }
89 
90 /**
91  * uvd_v7_0_enc_ring_get_wptr - get enc write pointer
92  *
93  * @ring: amdgpu_ring pointer
94  *
95  * Returns the current hardware enc write pointer
96  */
97 static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
98 {
99 	struct amdgpu_device *adev = ring->adev;
100 
101 	if (ring == &adev->uvd.ring_enc[0])
102 		return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR));
103 	else
104 		return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR2));
105 }
106 
107 /**
108  * uvd_v7_0_ring_set_wptr - set write pointer
109  *
110  * @ring: amdgpu_ring pointer
111  *
112  * Commits the write pointer to the hardware
113  */
114 static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
115 {
116 	struct amdgpu_device *adev = ring->adev;
117 
118 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR), lower_32_bits(ring->wptr));
119 }
120 
121 /**
122  * uvd_v7_0_enc_ring_set_wptr - set enc write pointer
123  *
124  * @ring: amdgpu_ring pointer
125  *
126  * Commits the enc write pointer to the hardware
127  */
128 static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
129 {
130 	struct amdgpu_device *adev = ring->adev;
131 
132 	if (ring == &adev->uvd.ring_enc[0])
133 		WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR),
134 			lower_32_bits(ring->wptr));
135 	else
136 		WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR2),
137 			lower_32_bits(ring->wptr));
138 }
139 
140 /**
141  * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
142  *
143  * @ring: the engine to test on
144  *
145  */
146 static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
147 {
148 	struct amdgpu_device *adev = ring->adev;
149 	uint32_t rptr = amdgpu_ring_get_rptr(ring);
150 	unsigned i;
151 	int r;
152 
153 	r = amdgpu_ring_alloc(ring, 16);
154 	if (r) {
155 		DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n",
156 			  ring->idx, r);
157 		return r;
158 	}
159 	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
160 	amdgpu_ring_commit(ring);
161 
162 	for (i = 0; i < adev->usec_timeout; i++) {
163 		if (amdgpu_ring_get_rptr(ring) != rptr)
164 			break;
165 		DRM_UDELAY(1);
166 	}
167 
168 	if (i < adev->usec_timeout) {
169 		DRM_INFO("ring test on %d succeeded in %d usecs\n",
170 			 ring->idx, i);
171 	} else {
172 		DRM_ERROR("amdgpu: ring %d test failed\n",
173 			  ring->idx);
174 		r = -ETIMEDOUT;
175 	}
176 
177 	return r;
178 }
179 
180 /**
181  * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
182  *
183  * @adev: amdgpu_device pointer
184  * @ring: ring we should submit the msg to
185  * @handle: session handle to use
186  * @fence: optional fence to return
187  *
188  * Open up a stream for HW test
189  */
190 static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
191 				       struct dma_fence **fence)
192 {
193 	const unsigned ib_size_dw = 16;
194 	struct amdgpu_job *job;
195 	struct amdgpu_ib *ib;
196 	struct dma_fence *f = NULL;
197 	uint64_t dummy;
198 	int i, r;
199 
200 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
201 	if (r)
202 		return r;
203 
204 	ib = &job->ibs[0];
205 	dummy = ib->gpu_addr + 1024;
206 
207 	ib->length_dw = 0;
208 	ib->ptr[ib->length_dw++] = 0x00000018;
209 	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
210 	ib->ptr[ib->length_dw++] = handle;
211 	ib->ptr[ib->length_dw++] = 0x00000000;
212 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
213 	ib->ptr[ib->length_dw++] = dummy;
214 
215 	ib->ptr[ib->length_dw++] = 0x00000014;
216 	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
217 	ib->ptr[ib->length_dw++] = 0x0000001c;
218 	ib->ptr[ib->length_dw++] = 0x00000000;
219 	ib->ptr[ib->length_dw++] = 0x00000000;
220 
221 	ib->ptr[ib->length_dw++] = 0x00000008;
222 	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
223 
224 	for (i = ib->length_dw; i < ib_size_dw; ++i)
225 		ib->ptr[i] = 0x0;
226 
227 	r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
228 	job->fence = dma_fence_get(f);
229 	if (r)
230 		goto err;
231 
232 	amdgpu_job_free(job);
233 	if (fence)
234 		*fence = dma_fence_get(f);
235 	dma_fence_put(f);
236 	return 0;
237 
238 err:
239 	amdgpu_job_free(job);
240 	return r;
241 }
242 
243 /**
244  * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
245  *
246  * @adev: amdgpu_device pointer
247  * @ring: ring we should submit the msg to
248  * @handle: session handle to use
249  * @fence: optional fence to return
250  *
251  * Close up a stream for HW test or if userspace failed to do so
252  */
253 int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
254 				 bool direct, struct dma_fence **fence)
255 {
256 	const unsigned ib_size_dw = 16;
257 	struct amdgpu_job *job;
258 	struct amdgpu_ib *ib;
259 	struct dma_fence *f = NULL;
260 	uint64_t dummy;
261 	int i, r;
262 
263 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
264 	if (r)
265 		return r;
266 
267 	ib = &job->ibs[0];
268 	dummy = ib->gpu_addr + 1024;
269 
270 	ib->length_dw = 0;
271 	ib->ptr[ib->length_dw++] = 0x00000018;
272 	ib->ptr[ib->length_dw++] = 0x00000001;
273 	ib->ptr[ib->length_dw++] = handle;
274 	ib->ptr[ib->length_dw++] = 0x00000000;
275 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
276 	ib->ptr[ib->length_dw++] = dummy;
277 
278 	ib->ptr[ib->length_dw++] = 0x00000014;
279 	ib->ptr[ib->length_dw++] = 0x00000002;
280 	ib->ptr[ib->length_dw++] = 0x0000001c;
281 	ib->ptr[ib->length_dw++] = 0x00000000;
282 	ib->ptr[ib->length_dw++] = 0x00000000;
283 
284 	ib->ptr[ib->length_dw++] = 0x00000008;
285 	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
286 
287 	for (i = ib->length_dw; i < ib_size_dw; ++i)
288 		ib->ptr[i] = 0x0;
289 
290 	if (direct) {
291 		r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
292 		job->fence = dma_fence_get(f);
293 		if (r)
294 			goto err;
295 
296 		amdgpu_job_free(job);
297 	} else {
298 		r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
299 				      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
300 		if (r)
301 			goto err;
302 	}
303 
304 	if (fence)
305 		*fence = dma_fence_get(f);
306 	dma_fence_put(f);
307 	return 0;
308 
309 err:
310 	amdgpu_job_free(job);
311 	return r;
312 }
313 
314 /**
315  * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
316  *
317  * @ring: the engine to test on
318  *
319  */
320 static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
321 {
322 	struct dma_fence *fence = NULL;
323 	long r;
324 
325 	r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
326 	if (r) {
327 		DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
328 		goto error;
329 	}
330 
331 	r = uvd_v7_0_enc_get_destroy_msg(ring, 1, true, &fence);
332 	if (r) {
333 		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
334 		goto error;
335 	}
336 
337 	r = dma_fence_wait_timeout(fence, false, timeout);
338 	if (r == 0) {
339 		DRM_ERROR("amdgpu: IB test timed out.\n");
340 		r = -ETIMEDOUT;
341 	} else if (r < 0) {
342 		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
343 	} else {
344 		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
345 		r = 0;
346 	}
347 error:
348 	dma_fence_put(fence);
349 	return r;
350 }
351 
352 static int uvd_v7_0_early_init(void *handle)
353 {
354 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
355 
356 	adev->uvd.num_enc_rings = 2;
357 	uvd_v7_0_set_ring_funcs(adev);
358 	uvd_v7_0_set_enc_ring_funcs(adev);
359 	uvd_v7_0_set_irq_funcs(adev);
360 
361 	return 0;
362 }
363 
364 static int uvd_v7_0_sw_init(void *handle)
365 {
366 	struct amdgpu_ring *ring;
367 	struct amd_sched_rq *rq;
368 	int i, r;
369 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
370 
371 	/* UVD TRAP */
372 	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UVD, 124, &adev->uvd.irq);
373 	if (r)
374 		return r;
375 
376 	/* UVD ENC TRAP */
377 	for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
378 		r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UVD, i + 119, &adev->uvd.irq);
379 		if (r)
380 			return r;
381 	}
382 
383 	r = amdgpu_uvd_sw_init(adev);
384 	if (r)
385 		return r;
386 
387 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
388 		const struct common_firmware_header *hdr;
389 		hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
390 		adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
391 		adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
392 		adev->firmware.fw_size +=
393 			ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
394 		DRM_INFO("PSP loading UVD firmware\n");
395 	}
396 
397 	ring = &adev->uvd.ring_enc[0];
398 	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
399 	r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
400 				  rq, amdgpu_sched_jobs);
401 	if (r) {
402 		DRM_ERROR("Failed setting up UVD ENC run queue.\n");
403 		return r;
404 	}
405 
406 	r = amdgpu_uvd_resume(adev);
407 	if (r)
408 		return r;
409 
410 	ring = &adev->uvd.ring;
411 	sprintf(ring->name, "uvd");
412 	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
413 	if (r)
414 		return r;
415 
416 	for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
417 		ring = &adev->uvd.ring_enc[i];
418 		sprintf(ring->name, "uvd_enc%d", i);
419 		r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
420 		if (r)
421 			return r;
422 	}
423 
424 	return r;
425 }
426 
427 static int uvd_v7_0_sw_fini(void *handle)
428 {
429 	int i, r;
430 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
431 
432 	r = amdgpu_uvd_suspend(adev);
433 	if (r)
434 		return r;
435 
436 	amd_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
437 
438 	for (i = 0; i < adev->uvd.num_enc_rings; ++i)
439 		amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
440 
441 	r = amdgpu_uvd_sw_fini(adev);
442 	if (r)
443 		return r;
444 
445 	return r;
446 }
447 
448 /**
449  * uvd_v7_0_hw_init - start and test UVD block
450  *
451  * @adev: amdgpu_device pointer
452  *
453  * Initialize the hardware, boot up the VCPU and do some testing
454  */
455 static int uvd_v7_0_hw_init(void *handle)
456 {
457 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
458 	struct amdgpu_ring *ring = &adev->uvd.ring;
459 	uint32_t tmp;
460 	int i, r;
461 
462 	r = uvd_v7_0_start(adev);
463 	if (r)
464 		goto done;
465 
466 	ring->ready = true;
467 	r = amdgpu_ring_test_ring(ring);
468 	if (r) {
469 		ring->ready = false;
470 		goto done;
471 	}
472 
473 	r = amdgpu_ring_alloc(ring, 10);
474 	if (r) {
475 		DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
476 		goto done;
477 	}
478 
479 	tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
480 		mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
481 	amdgpu_ring_write(ring, tmp);
482 	amdgpu_ring_write(ring, 0xFFFFF);
483 
484 	tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
485 		mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
486 	amdgpu_ring_write(ring, tmp);
487 	amdgpu_ring_write(ring, 0xFFFFF);
488 
489 	tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
490 		mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
491 	amdgpu_ring_write(ring, tmp);
492 	amdgpu_ring_write(ring, 0xFFFFF);
493 
494 	/* Clear timeout status bits */
495 	amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
496 		mmUVD_SEMA_TIMEOUT_STATUS), 0));
497 	amdgpu_ring_write(ring, 0x8);
498 
499 	amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
500 		mmUVD_SEMA_CNTL), 0));
501 	amdgpu_ring_write(ring, 3);
502 
503 	amdgpu_ring_commit(ring);
504 
505 	for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
506 		ring = &adev->uvd.ring_enc[i];
507 		ring->ready = true;
508 		r = amdgpu_ring_test_ring(ring);
509 		if (r) {
510 			ring->ready = false;
511 			goto done;
512 		}
513 	}
514 
515 done:
516 	if (!r)
517 		DRM_INFO("UVD and UVD ENC initialized successfully.\n");
518 
519 	return r;
520 }
521 
522 /**
523  * uvd_v7_0_hw_fini - stop the hardware block
524  *
525  * @adev: amdgpu_device pointer
526  *
527  * Stop the UVD block, mark ring as not ready any more
528  */
529 static int uvd_v7_0_hw_fini(void *handle)
530 {
531 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
532 	struct amdgpu_ring *ring = &adev->uvd.ring;
533 
534 	uvd_v7_0_stop(adev);
535 	ring->ready = false;
536 
537 	return 0;
538 }
539 
540 static int uvd_v7_0_suspend(void *handle)
541 {
542 	int r;
543 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
544 
545 	r = uvd_v7_0_hw_fini(adev);
546 	if (r)
547 		return r;
548 
549 	/* Skip this for APU for now */
550 	if (!(adev->flags & AMD_IS_APU)) {
551 		r = amdgpu_uvd_suspend(adev);
552 		if (r)
553 			return r;
554 	}
555 
556 	return r;
557 }
558 
559 static int uvd_v7_0_resume(void *handle)
560 {
561 	int r;
562 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
563 
564 	/* Skip this for APU for now */
565 	if (!(adev->flags & AMD_IS_APU)) {
566 		r = amdgpu_uvd_resume(adev);
567 		if (r)
568 			return r;
569 	}
570 	r = uvd_v7_0_hw_init(adev);
571 	if (r)
572 		return r;
573 
574 	return r;
575 }
576 
577 /**
578  * uvd_v7_0_mc_resume - memory controller programming
579  *
580  * @adev: amdgpu_device pointer
581  *
582  * Let the UVD memory controller know it's offsets
583  */
584 static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
585 {
586 	uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
587 	uint32_t offset;
588 
589 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
590 		WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
591 			lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
592 		WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
593 			upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
594 		offset = 0;
595 	} else {
596 		WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
597 			lower_32_bits(adev->uvd.gpu_addr));
598 		WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
599 			upper_32_bits(adev->uvd.gpu_addr));
600 		offset = size;
601 	}
602 
603 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
604 				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
605 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size);
606 
607 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
608 			lower_32_bits(adev->uvd.gpu_addr + offset));
609 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
610 			upper_32_bits(adev->uvd.gpu_addr + offset));
611 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
612 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
613 
614 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
615 			lower_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
616 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
617 			upper_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
618 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
619 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2),
620 			AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
621 
622 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_ADDR_CONFIG),
623 			adev->gfx.config.gb_addr_config);
624 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG),
625 			adev->gfx.config.gb_addr_config);
626 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG),
627 			adev->gfx.config.gb_addr_config);
628 
629 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
630 }
631 
632 /**
633  * uvd_v7_0_start - start UVD block
634  *
635  * @adev: amdgpu_device pointer
636  *
637  * Setup and start the UVD block
638  */
639 static int uvd_v7_0_start(struct amdgpu_device *adev)
640 {
641 	struct amdgpu_ring *ring = &adev->uvd.ring;
642 	uint32_t rb_bufsz, tmp;
643 	uint32_t lmi_swap_cntl;
644 	uint32_t mp_swap_cntl;
645 	int i, j, r;
646 
647 	/* disable DPG */
648 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
649 			~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
650 
651 	/* disable byte swapping */
652 	lmi_swap_cntl = 0;
653 	mp_swap_cntl = 0;
654 
655 	uvd_v7_0_mc_resume(adev);
656 
657 	/* disable clock gating */
658 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL), 0,
659 			~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
660 
661 	/* disable interupt */
662 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
663 			~UVD_MASTINT_EN__VCPU_EN_MASK);
664 
665 	/* stall UMC and register bus before resetting VCPU */
666 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
667 			UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
668 			~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
669 	mdelay(1);
670 
671 	/* put LMI, VCPU, RBC etc... into reset */
672 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
673 		UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
674 		UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
675 		UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
676 		UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
677 		UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
678 		UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
679 		UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
680 		UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
681 	mdelay(5);
682 
683 	/* initialize UVD memory controller */
684 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL),
685 		(0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
686 		UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
687 		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
688 		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
689 		UVD_LMI_CTRL__REQ_MODE_MASK |
690 		0x00100000L);
691 
692 #ifdef __BIG_ENDIAN
693 	/* swap (8 in 32) RB and IB */
694 	lmi_swap_cntl = 0xa;
695 	mp_swap_cntl = 0;
696 #endif
697 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_SWAP_CNTL), lmi_swap_cntl);
698 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MP_SWAP_CNTL), mp_swap_cntl);
699 
700 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA0), 0x40c2040);
701 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA1), 0x0);
702 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB0), 0x40c2040);
703 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB1), 0x0);
704 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_ALU), 0);
705 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUX), 0x88);
706 
707 	/* take all subblocks out of reset, except VCPU */
708 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
709 			UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
710 	mdelay(5);
711 
712 	/* enable VCPU clock */
713 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
714 			UVD_VCPU_CNTL__CLK_EN_MASK);
715 
716 	/* enable UMC */
717 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
718 			~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
719 
720 	/* boot up the VCPU */
721 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0);
722 	mdelay(10);
723 
724 	for (i = 0; i < 10; ++i) {
725 		uint32_t status;
726 
727 		for (j = 0; j < 100; ++j) {
728 			status = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS));
729 			if (status & 2)
730 				break;
731 			mdelay(10);
732 		}
733 		r = 0;
734 		if (status & 2)
735 			break;
736 
737 		DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
738 		WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
739 				UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
740 				~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
741 		mdelay(10);
742 		WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
743 				~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
744 		mdelay(10);
745 		r = -1;
746 	}
747 
748 	if (r) {
749 		DRM_ERROR("UVD not responding, giving up!!!\n");
750 		return r;
751 	}
752 	/* enable master interrupt */
753 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
754 		(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
755 		~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
756 
757 	/* clear the bit 4 of UVD_STATUS */
758 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
759 			~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
760 
761 	/* force RBC into idle state */
762 	rb_bufsz = order_base_2(ring->ring_size);
763 	tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
764 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
765 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
766 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
767 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
768 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
769 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp);
770 
771 	/* set the write pointer delay */
772 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL), 0);
773 
774 	/* set the wb address */
775 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR),
776 			(upper_32_bits(ring->gpu_addr) >> 2));
777 
778 	/* programm the RB_BASE for ring buffer */
779 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
780 			lower_32_bits(ring->gpu_addr));
781 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
782 			upper_32_bits(ring->gpu_addr));
783 
784 	/* Initialize the ring buffer's read and write pointers */
785 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR), 0);
786 
787 	ring->wptr = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR));
788 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR),
789 			lower_32_bits(ring->wptr));
790 
791 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
792 			~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
793 
794 	ring = &adev->uvd.ring_enc[0];
795 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_RPTR), lower_32_bits(ring->wptr));
796 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR), lower_32_bits(ring->wptr));
797 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr);
798 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
799 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE), ring->ring_size / 4);
800 
801 	ring = &adev->uvd.ring_enc[1];
802 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_RPTR2), lower_32_bits(ring->wptr));
803 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR2), lower_32_bits(ring->wptr));
804 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO2), ring->gpu_addr);
805 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI2), upper_32_bits(ring->gpu_addr));
806 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE2), ring->ring_size / 4);
807 
808 	return 0;
809 }
810 
811 /**
812  * uvd_v7_0_stop - stop UVD block
813  *
814  * @adev: amdgpu_device pointer
815  *
816  * stop the UVD block
817  */
818 static void uvd_v7_0_stop(struct amdgpu_device *adev)
819 {
820 	/* force RBC into idle state */
821 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0x11010101);
822 
823 	/* Stall UMC and register bus before resetting VCPU */
824 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
825 			UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
826 			~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
827 	mdelay(1);
828 
829 	/* put VCPU into reset */
830 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
831 			UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
832 	mdelay(5);
833 
834 	/* disable VCPU clock */
835 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0x0);
836 
837 	/* Unstall UMC and register bus */
838 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
839 			~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
840 }
841 
842 /**
843  * uvd_v7_0_ring_emit_fence - emit an fence & trap command
844  *
845  * @ring: amdgpu_ring pointer
846  * @fence: fence to emit
847  *
848  * Write a fence and a trap command to the ring.
849  */
850 static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
851 				     unsigned flags)
852 {
853 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
854 
855 	amdgpu_ring_write(ring,
856 		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
857 	amdgpu_ring_write(ring, seq);
858 	amdgpu_ring_write(ring,
859 		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
860 	amdgpu_ring_write(ring, addr & 0xffffffff);
861 	amdgpu_ring_write(ring,
862 		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
863 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
864 	amdgpu_ring_write(ring,
865 		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
866 	amdgpu_ring_write(ring, 0);
867 
868 	amdgpu_ring_write(ring,
869 		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
870 	amdgpu_ring_write(ring, 0);
871 	amdgpu_ring_write(ring,
872 		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
873 	amdgpu_ring_write(ring, 0);
874 	amdgpu_ring_write(ring,
875 		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
876 	amdgpu_ring_write(ring, 2);
877 }
878 
879 /**
880  * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
881  *
882  * @ring: amdgpu_ring pointer
883  * @fence: fence to emit
884  *
885  * Write enc a fence and a trap command to the ring.
886  */
887 static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
888 			u64 seq, unsigned flags)
889 {
890 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
891 
892 	amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
893 	amdgpu_ring_write(ring, addr);
894 	amdgpu_ring_write(ring, upper_32_bits(addr));
895 	amdgpu_ring_write(ring, seq);
896 	amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
897 }
898 
899 /**
900  * uvd_v7_0_ring_emit_hdp_flush - emit an hdp flush
901  *
902  * @ring: amdgpu_ring pointer
903  *
904  * Emits an hdp flush.
905  */
906 static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
907 {
908 	amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(NBIF, 0,
909 		mmHDP_MEM_COHERENCY_FLUSH_CNTL), 0));
910 	amdgpu_ring_write(ring, 0);
911 }
912 
913 /**
914  * uvd_v7_0_ring_hdp_invalidate - emit an hdp invalidate
915  *
916  * @ring: amdgpu_ring pointer
917  *
918  * Emits an hdp invalidate.
919  */
920 static void uvd_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
921 {
922 	amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 0));
923 	amdgpu_ring_write(ring, 1);
924 }
925 
926 /**
927  * uvd_v7_0_ring_test_ring - register write test
928  *
929  * @ring: amdgpu_ring pointer
930  *
931  * Test if we can successfully write to the context register
932  */
933 static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
934 {
935 	struct amdgpu_device *adev = ring->adev;
936 	uint32_t tmp = 0;
937 	unsigned i;
938 	int r;
939 
940 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
941 	r = amdgpu_ring_alloc(ring, 3);
942 	if (r) {
943 		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
944 			  ring->idx, r);
945 		return r;
946 	}
947 	amdgpu_ring_write(ring,
948 		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
949 	amdgpu_ring_write(ring, 0xDEADBEEF);
950 	amdgpu_ring_commit(ring);
951 	for (i = 0; i < adev->usec_timeout; i++) {
952 		tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
953 		if (tmp == 0xDEADBEEF)
954 			break;
955 		DRM_UDELAY(1);
956 	}
957 
958 	if (i < adev->usec_timeout) {
959 		DRM_INFO("ring test on %d succeeded in %d usecs\n",
960 			 ring->idx, i);
961 	} else {
962 		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
963 			  ring->idx, tmp);
964 		r = -EINVAL;
965 	}
966 	return r;
967 }
968 
969 /**
970  * uvd_v7_0_ring_emit_ib - execute indirect buffer
971  *
972  * @ring: amdgpu_ring pointer
973  * @ib: indirect buffer to execute
974  *
975  * Write ring commands to execute the indirect buffer
976  */
977 static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
978 				  struct amdgpu_ib *ib,
979 				  unsigned vm_id, bool ctx_switch)
980 {
981 	amdgpu_ring_write(ring,
982 		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
983 	amdgpu_ring_write(ring, vm_id);
984 
985 	amdgpu_ring_write(ring,
986 		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
987 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
988 	amdgpu_ring_write(ring,
989 		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
990 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
991 	amdgpu_ring_write(ring,
992 		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0));
993 	amdgpu_ring_write(ring, ib->length_dw);
994 }
995 
996 /**
997  * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
998  *
999  * @ring: amdgpu_ring pointer
1000  * @ib: indirect buffer to execute
1001  *
1002  * Write enc ring commands to execute the indirect buffer
1003  */
1004 static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1005 		struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
1006 {
1007 	amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1008 	amdgpu_ring_write(ring, vm_id);
1009 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1010 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1011 	amdgpu_ring_write(ring, ib->length_dw);
1012 }
1013 
1014 static void uvd_v7_0_vm_reg_write(struct amdgpu_ring *ring,
1015 				uint32_t data0, uint32_t data1)
1016 {
1017 	amdgpu_ring_write(ring,
1018 		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1019 	amdgpu_ring_write(ring, data0);
1020 	amdgpu_ring_write(ring,
1021 		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1022 	amdgpu_ring_write(ring, data1);
1023 	amdgpu_ring_write(ring,
1024 		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1025 	amdgpu_ring_write(ring, 8);
1026 }
1027 
1028 static void uvd_v7_0_vm_reg_wait(struct amdgpu_ring *ring,
1029 				uint32_t data0, uint32_t data1, uint32_t mask)
1030 {
1031 	amdgpu_ring_write(ring,
1032 		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1033 	amdgpu_ring_write(ring, data0);
1034 	amdgpu_ring_write(ring,
1035 		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1036 	amdgpu_ring_write(ring, data1);
1037 	amdgpu_ring_write(ring,
1038 		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
1039 	amdgpu_ring_write(ring, mask);
1040 	amdgpu_ring_write(ring,
1041 		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1042 	amdgpu_ring_write(ring, 12);
1043 }
1044 
1045 static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1046 					unsigned vm_id, uint64_t pd_addr)
1047 {
1048 	uint32_t data0, data1, mask;
1049 	unsigned eng = ring->idx;
1050 	unsigned i;
1051 
1052 	pd_addr = pd_addr | 0x1; /* valid bit */
1053 	/* now only use physical base address of PDE and valid */
1054 	BUG_ON(pd_addr & 0xFFFF00000000003EULL);
1055 
1056 	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
1057 		struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
1058 		uint32_t req = hub->get_invalidate_req(vm_id);
1059 
1060 		data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
1061 		data1 = upper_32_bits(pd_addr);
1062 		uvd_v7_0_vm_reg_write(ring, data0, data1);
1063 
1064 		data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
1065 		data1 = lower_32_bits(pd_addr);
1066 		uvd_v7_0_vm_reg_write(ring, data0, data1);
1067 
1068 		data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
1069 		data1 = lower_32_bits(pd_addr);
1070 		mask = 0xffffffff;
1071 		uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
1072 
1073 		/* flush TLB */
1074 		data0 = (hub->vm_inv_eng0_req + eng) << 2;
1075 		data1 = req;
1076 		uvd_v7_0_vm_reg_write(ring, data0, data1);
1077 
1078 		/* wait for flush */
1079 		data0 = (hub->vm_inv_eng0_ack + eng) << 2;
1080 		data1 = 1 << vm_id;
1081 		mask =  1 << vm_id;
1082 		uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
1083 	}
1084 }
1085 
1086 static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1087 {
1088 	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1089 }
1090 
1091 static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1092 			 unsigned int vm_id, uint64_t pd_addr)
1093 {
1094 	unsigned eng = ring->idx;
1095 	unsigned i;
1096 
1097 	pd_addr = pd_addr | 0x1; /* valid bit */
1098 	/* now only use physical base address of PDE and valid */
1099 	BUG_ON(pd_addr & 0xFFFF00000000003EULL);
1100 
1101 	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
1102 		struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
1103 		uint32_t req = hub->get_invalidate_req(vm_id);
1104 
1105 		amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1106 		amdgpu_ring_write(ring,
1107 			(hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
1108 		amdgpu_ring_write(ring, upper_32_bits(pd_addr));
1109 
1110 		amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1111 		amdgpu_ring_write(ring,
1112 			(hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
1113 		amdgpu_ring_write(ring, lower_32_bits(pd_addr));
1114 
1115 		amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1116 		amdgpu_ring_write(ring,
1117 			(hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
1118 		amdgpu_ring_write(ring, 0xffffffff);
1119 		amdgpu_ring_write(ring, lower_32_bits(pd_addr));
1120 
1121 		/* flush TLB */
1122 		amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1123 		amdgpu_ring_write(ring,	(hub->vm_inv_eng0_req + eng) << 2);
1124 		amdgpu_ring_write(ring, req);
1125 
1126 		/* wait for flush */
1127 		amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1128 		amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
1129 		amdgpu_ring_write(ring, 1 << vm_id);
1130 		amdgpu_ring_write(ring, 1 << vm_id);
1131 	}
1132 }
1133 
1134 #if 0
1135 static bool uvd_v7_0_is_idle(void *handle)
1136 {
1137 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1138 
1139 	return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1140 }
1141 
1142 static int uvd_v7_0_wait_for_idle(void *handle)
1143 {
1144 	unsigned i;
1145 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1146 
1147 	for (i = 0; i < adev->usec_timeout; i++) {
1148 		if (uvd_v7_0_is_idle(handle))
1149 			return 0;
1150 	}
1151 	return -ETIMEDOUT;
1152 }
1153 
1154 #define AMDGPU_UVD_STATUS_BUSY_MASK    0xfd
1155 static bool uvd_v7_0_check_soft_reset(void *handle)
1156 {
1157 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1158 	u32 srbm_soft_reset = 0;
1159 	u32 tmp = RREG32(mmSRBM_STATUS);
1160 
1161 	if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1162 	    REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1163 	    (RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS) &
1164 		    AMDGPU_UVD_STATUS_BUSY_MASK)))
1165 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1166 				SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1167 
1168 	if (srbm_soft_reset) {
1169 		adev->uvd.srbm_soft_reset = srbm_soft_reset;
1170 		return true;
1171 	} else {
1172 		adev->uvd.srbm_soft_reset = 0;
1173 		return false;
1174 	}
1175 }
1176 
1177 static int uvd_v7_0_pre_soft_reset(void *handle)
1178 {
1179 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1180 
1181 	if (!adev->uvd.srbm_soft_reset)
1182 		return 0;
1183 
1184 	uvd_v7_0_stop(adev);
1185 	return 0;
1186 }
1187 
1188 static int uvd_v7_0_soft_reset(void *handle)
1189 {
1190 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1191 	u32 srbm_soft_reset;
1192 
1193 	if (!adev->uvd.srbm_soft_reset)
1194 		return 0;
1195 	srbm_soft_reset = adev->uvd.srbm_soft_reset;
1196 
1197 	if (srbm_soft_reset) {
1198 		u32 tmp;
1199 
1200 		tmp = RREG32(mmSRBM_SOFT_RESET);
1201 		tmp |= srbm_soft_reset;
1202 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1203 		WREG32(mmSRBM_SOFT_RESET, tmp);
1204 		tmp = RREG32(mmSRBM_SOFT_RESET);
1205 
1206 		udelay(50);
1207 
1208 		tmp &= ~srbm_soft_reset;
1209 		WREG32(mmSRBM_SOFT_RESET, tmp);
1210 		tmp = RREG32(mmSRBM_SOFT_RESET);
1211 
1212 		/* Wait a little for things to settle down */
1213 		udelay(50);
1214 	}
1215 
1216 	return 0;
1217 }
1218 
1219 static int uvd_v7_0_post_soft_reset(void *handle)
1220 {
1221 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1222 
1223 	if (!adev->uvd.srbm_soft_reset)
1224 		return 0;
1225 
1226 	mdelay(5);
1227 
1228 	return uvd_v7_0_start(adev);
1229 }
1230 #endif
1231 
1232 static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
1233 					struct amdgpu_irq_src *source,
1234 					unsigned type,
1235 					enum amdgpu_interrupt_state state)
1236 {
1237 	// TODO
1238 	return 0;
1239 }
1240 
1241 static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1242 				      struct amdgpu_irq_src *source,
1243 				      struct amdgpu_iv_entry *entry)
1244 {
1245 	DRM_DEBUG("IH: UVD TRAP\n");
1246 	switch (entry->src_id) {
1247 	case 124:
1248 		amdgpu_fence_process(&adev->uvd.ring);
1249 		break;
1250 	case 119:
1251 		amdgpu_fence_process(&adev->uvd.ring_enc[0]);
1252 		break;
1253 	case 120:
1254 		amdgpu_fence_process(&adev->uvd.ring_enc[1]);
1255 		break;
1256 	default:
1257 		DRM_ERROR("Unhandled interrupt: %d %d\n",
1258 			  entry->src_id, entry->src_data[0]);
1259 		break;
1260 	}
1261 
1262 	return 0;
1263 }
1264 
1265 #if 0
1266 static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
1267 {
1268 	uint32_t data, data1, data2, suvd_flags;
1269 
1270 	data = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL));
1271 	data1 = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SUVD_CGC_GATE));
1272 	data2 = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SUVD_CGC_CTRL));
1273 
1274 	data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1275 		  UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1276 
1277 	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1278 		     UVD_SUVD_CGC_GATE__SIT_MASK |
1279 		     UVD_SUVD_CGC_GATE__SMP_MASK |
1280 		     UVD_SUVD_CGC_GATE__SCM_MASK |
1281 		     UVD_SUVD_CGC_GATE__SDB_MASK;
1282 
1283 	data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1284 		(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1285 		(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1286 
1287 	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1288 			UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1289 			UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1290 			UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1291 			UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1292 			UVD_CGC_CTRL__SYS_MODE_MASK |
1293 			UVD_CGC_CTRL__UDEC_MODE_MASK |
1294 			UVD_CGC_CTRL__MPEG2_MODE_MASK |
1295 			UVD_CGC_CTRL__REGS_MODE_MASK |
1296 			UVD_CGC_CTRL__RBC_MODE_MASK |
1297 			UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1298 			UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1299 			UVD_CGC_CTRL__IDCT_MODE_MASK |
1300 			UVD_CGC_CTRL__MPRD_MODE_MASK |
1301 			UVD_CGC_CTRL__MPC_MODE_MASK |
1302 			UVD_CGC_CTRL__LBSI_MODE_MASK |
1303 			UVD_CGC_CTRL__LRBBM_MODE_MASK |
1304 			UVD_CGC_CTRL__WCB_MODE_MASK |
1305 			UVD_CGC_CTRL__VCPU_MODE_MASK |
1306 			UVD_CGC_CTRL__JPEG_MODE_MASK |
1307 			UVD_CGC_CTRL__JPEG2_MODE_MASK |
1308 			UVD_CGC_CTRL__SCPU_MODE_MASK);
1309 	data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1310 			UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1311 			UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1312 			UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1313 			UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1314 	data1 |= suvd_flags;
1315 
1316 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL), data);
1317 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_GATE), 0);
1318 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SUVD_CGC_GATE), data1);
1319 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SUVD_CGC_CTRL), data2);
1320 }
1321 
1322 static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
1323 {
1324 	uint32_t data, data1, cgc_flags, suvd_flags;
1325 
1326 	data = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_GATE));
1327 	data1 = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SUVD_CGC_GATE));
1328 
1329 	cgc_flags = UVD_CGC_GATE__SYS_MASK |
1330 		UVD_CGC_GATE__UDEC_MASK |
1331 		UVD_CGC_GATE__MPEG2_MASK |
1332 		UVD_CGC_GATE__RBC_MASK |
1333 		UVD_CGC_GATE__LMI_MC_MASK |
1334 		UVD_CGC_GATE__IDCT_MASK |
1335 		UVD_CGC_GATE__MPRD_MASK |
1336 		UVD_CGC_GATE__MPC_MASK |
1337 		UVD_CGC_GATE__LBSI_MASK |
1338 		UVD_CGC_GATE__LRBBM_MASK |
1339 		UVD_CGC_GATE__UDEC_RE_MASK |
1340 		UVD_CGC_GATE__UDEC_CM_MASK |
1341 		UVD_CGC_GATE__UDEC_IT_MASK |
1342 		UVD_CGC_GATE__UDEC_DB_MASK |
1343 		UVD_CGC_GATE__UDEC_MP_MASK |
1344 		UVD_CGC_GATE__WCB_MASK |
1345 		UVD_CGC_GATE__VCPU_MASK |
1346 		UVD_CGC_GATE__SCPU_MASK |
1347 		UVD_CGC_GATE__JPEG_MASK |
1348 		UVD_CGC_GATE__JPEG2_MASK;
1349 
1350 	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1351 				UVD_SUVD_CGC_GATE__SIT_MASK |
1352 				UVD_SUVD_CGC_GATE__SMP_MASK |
1353 				UVD_SUVD_CGC_GATE__SCM_MASK |
1354 				UVD_SUVD_CGC_GATE__SDB_MASK;
1355 
1356 	data |= cgc_flags;
1357 	data1 |= suvd_flags;
1358 
1359 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_GATE), data);
1360 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SUVD_CGC_GATE), data1);
1361 }
1362 
1363 static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
1364 {
1365 	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
1366 
1367 	if (enable)
1368 		tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1369 			GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1370 	else
1371 		tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1372 			 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1373 
1374 	WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
1375 }
1376 
1377 
1378 static int uvd_v7_0_set_clockgating_state(void *handle,
1379 					  enum amd_clockgating_state state)
1380 {
1381 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1382 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1383 
1384 	uvd_v7_0_set_bypass_mode(adev, enable);
1385 
1386 	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1387 		return 0;
1388 
1389 	if (enable) {
1390 		/* disable HW gating and enable Sw gating */
1391 		uvd_v7_0_set_sw_clock_gating(adev);
1392 	} else {
1393 		/* wait for STATUS to clear */
1394 		if (uvd_v7_0_wait_for_idle(handle))
1395 			return -EBUSY;
1396 
1397 		/* enable HW gates because UVD is idle */
1398 		/* uvd_v7_0_set_hw_clock_gating(adev); */
1399 	}
1400 
1401 	return 0;
1402 }
1403 
1404 static int uvd_v7_0_set_powergating_state(void *handle,
1405 					  enum amd_powergating_state state)
1406 {
1407 	/* This doesn't actually powergate the UVD block.
1408 	 * That's done in the dpm code via the SMC.  This
1409 	 * just re-inits the block as necessary.  The actual
1410 	 * gating still happens in the dpm code.  We should
1411 	 * revisit this when there is a cleaner line between
1412 	 * the smc and the hw blocks
1413 	 */
1414 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1415 
1416 	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1417 		return 0;
1418 
1419 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), UVD_POWER_STATUS__UVD_PG_EN_MASK);
1420 
1421 	if (state == AMD_PG_STATE_GATE) {
1422 		uvd_v7_0_stop(adev);
1423 		return 0;
1424 	} else {
1425 		return uvd_v7_0_start(adev);
1426 	}
1427 }
1428 #endif
1429 
1430 static int uvd_v7_0_set_clockgating_state(void *handle,
1431 					  enum amd_clockgating_state state)
1432 {
1433 	/* needed for driver unload*/
1434 	return 0;
1435 }
1436 
1437 const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
1438 	.name = "uvd_v7_0",
1439 	.early_init = uvd_v7_0_early_init,
1440 	.late_init = NULL,
1441 	.sw_init = uvd_v7_0_sw_init,
1442 	.sw_fini = uvd_v7_0_sw_fini,
1443 	.hw_init = uvd_v7_0_hw_init,
1444 	.hw_fini = uvd_v7_0_hw_fini,
1445 	.suspend = uvd_v7_0_suspend,
1446 	.resume = uvd_v7_0_resume,
1447 	.is_idle = NULL /* uvd_v7_0_is_idle */,
1448 	.wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
1449 	.check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
1450 	.pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
1451 	.soft_reset = NULL /* uvd_v7_0_soft_reset */,
1452 	.post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
1453 	.set_clockgating_state = uvd_v7_0_set_clockgating_state,
1454 	.set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
1455 };
1456 
1457 static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1458 	.type = AMDGPU_RING_TYPE_UVD,
1459 	.align_mask = 0xf,
1460 	.nop = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0),
1461 	.support_64bit_ptrs = false,
1462 	.get_rptr = uvd_v7_0_ring_get_rptr,
1463 	.get_wptr = uvd_v7_0_ring_get_wptr,
1464 	.set_wptr = uvd_v7_0_ring_set_wptr,
1465 	.emit_frame_size =
1466 		2 + /* uvd_v7_0_ring_emit_hdp_flush */
1467 		2 + /* uvd_v7_0_ring_emit_hdp_invalidate */
1468 		34 * AMDGPU_MAX_VMHUBS + /* uvd_v7_0_ring_emit_vm_flush */
1469 		14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
1470 	.emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
1471 	.emit_ib = uvd_v7_0_ring_emit_ib,
1472 	.emit_fence = uvd_v7_0_ring_emit_fence,
1473 	.emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1474 	.emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1475 	.emit_hdp_invalidate = uvd_v7_0_ring_emit_hdp_invalidate,
1476 	.test_ring = uvd_v7_0_ring_test_ring,
1477 	.test_ib = amdgpu_uvd_ring_test_ib,
1478 	.insert_nop = amdgpu_ring_insert_nop,
1479 	.pad_ib = amdgpu_ring_generic_pad_ib,
1480 	.begin_use = amdgpu_uvd_ring_begin_use,
1481 	.end_use = amdgpu_uvd_ring_end_use,
1482 };
1483 
1484 static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1485 	.type = AMDGPU_RING_TYPE_UVD_ENC,
1486 	.align_mask = 0x3f,
1487 	.nop = HEVC_ENC_CMD_NO_OP,
1488 	.support_64bit_ptrs = false,
1489 	.get_rptr = uvd_v7_0_enc_ring_get_rptr,
1490 	.get_wptr = uvd_v7_0_enc_ring_get_wptr,
1491 	.set_wptr = uvd_v7_0_enc_ring_set_wptr,
1492 	.emit_frame_size =
1493 		17 * AMDGPU_MAX_VMHUBS + /* uvd_v7_0_enc_ring_emit_vm_flush */
1494 		5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1495 		1, /* uvd_v7_0_enc_ring_insert_end */
1496 	.emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
1497 	.emit_ib = uvd_v7_0_enc_ring_emit_ib,
1498 	.emit_fence = uvd_v7_0_enc_ring_emit_fence,
1499 	.emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
1500 	.test_ring = uvd_v7_0_enc_ring_test_ring,
1501 	.test_ib = uvd_v7_0_enc_ring_test_ib,
1502 	.insert_nop = amdgpu_ring_insert_nop,
1503 	.insert_end = uvd_v7_0_enc_ring_insert_end,
1504 	.pad_ib = amdgpu_ring_generic_pad_ib,
1505 	.begin_use = amdgpu_uvd_ring_begin_use,
1506 	.end_use = amdgpu_uvd_ring_end_use,
1507 };
1508 
1509 static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1510 {
1511 	adev->uvd.ring.funcs = &uvd_v7_0_ring_vm_funcs;
1512 	DRM_INFO("UVD is enabled in VM mode\n");
1513 }
1514 
1515 static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1516 {
1517 	int i;
1518 
1519 	for (i = 0; i < adev->uvd.num_enc_rings; ++i)
1520 		adev->uvd.ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1521 
1522 	DRM_INFO("UVD ENC is enabled in VM mode\n");
1523 }
1524 
1525 static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
1526 	.set = uvd_v7_0_set_interrupt_state,
1527 	.process = uvd_v7_0_process_interrupt,
1528 };
1529 
1530 static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1531 {
1532 	adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1;
1533 	adev->uvd.irq.funcs = &uvd_v7_0_irq_funcs;
1534 }
1535 
1536 const struct amdgpu_ip_block_version uvd_v7_0_ip_block =
1537 {
1538 		.type = AMD_IP_BLOCK_TYPE_UVD,
1539 		.major = 7,
1540 		.minor = 0,
1541 		.rev = 0,
1542 		.funcs = &uvd_v7_0_ip_funcs,
1543 };
1544