1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_uvd.h"
28 #include "amdgpu_cs.h"
29 #include "soc15.h"
30 #include "soc15d.h"
31 #include "soc15_common.h"
32 #include "mmsch_v1_0.h"
33
34 #include "uvd/uvd_7_0_offset.h"
35 #include "uvd/uvd_7_0_sh_mask.h"
36 #include "vce/vce_4_0_offset.h"
37 #include "vce/vce_4_0_default.h"
38 #include "vce/vce_4_0_sh_mask.h"
39 #include "nbif/nbif_6_1_offset.h"
40 #include "mmhub/mmhub_1_0_offset.h"
41 #include "mmhub/mmhub_1_0_sh_mask.h"
42 #include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
43
44 #define mmUVD_PG0_CC_UVD_HARVESTING 0x00c7
45 #define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX 1
46 //UVD_PG0_CC_UVD_HARVESTING
47 #define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1
48 #define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L
49
50 #define UVD7_MAX_HW_INSTANCES_VEGA20 2
51
52 static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
53 static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
54 static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
55 static int uvd_v7_0_start(struct amdgpu_device *adev);
56 static void uvd_v7_0_stop(struct amdgpu_device *adev);
57 static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
58
59 static int amdgpu_ih_clientid_uvds[] = {
60 SOC15_IH_CLIENTID_UVD,
61 SOC15_IH_CLIENTID_UVD1
62 };
63
64 /**
65 * uvd_v7_0_ring_get_rptr - get read pointer
66 *
67 * @ring: amdgpu_ring pointer
68 *
69 * Returns the current hardware read pointer
70 */
uvd_v7_0_ring_get_rptr(struct amdgpu_ring * ring)71 static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
72 {
73 struct amdgpu_device *adev = ring->adev;
74
75 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
76 }
77
78 /**
79 * uvd_v7_0_enc_ring_get_rptr - get enc read pointer
80 *
81 * @ring: amdgpu_ring pointer
82 *
83 * Returns the current hardware enc read pointer
84 */
uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring * ring)85 static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
86 {
87 struct amdgpu_device *adev = ring->adev;
88
89 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
90 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
91 else
92 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
93 }
94
95 /**
96 * uvd_v7_0_ring_get_wptr - get write pointer
97 *
98 * @ring: amdgpu_ring pointer
99 *
100 * Returns the current hardware write pointer
101 */
uvd_v7_0_ring_get_wptr(struct amdgpu_ring * ring)102 static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
103 {
104 struct amdgpu_device *adev = ring->adev;
105
106 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
107 }
108
109 /**
110 * uvd_v7_0_enc_ring_get_wptr - get enc write pointer
111 *
112 * @ring: amdgpu_ring pointer
113 *
114 * Returns the current hardware enc write pointer
115 */
uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring * ring)116 static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
117 {
118 struct amdgpu_device *adev = ring->adev;
119
120 if (ring->use_doorbell)
121 return *ring->wptr_cpu_addr;
122
123 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
124 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
125 else
126 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
127 }
128
129 /**
130 * uvd_v7_0_ring_set_wptr - set write pointer
131 *
132 * @ring: amdgpu_ring pointer
133 *
134 * Commits the write pointer to the hardware
135 */
uvd_v7_0_ring_set_wptr(struct amdgpu_ring * ring)136 static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
137 {
138 struct amdgpu_device *adev = ring->adev;
139
140 WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
141 }
142
143 /**
144 * uvd_v7_0_enc_ring_set_wptr - set enc write pointer
145 *
146 * @ring: amdgpu_ring pointer
147 *
148 * Commits the enc write pointer to the hardware
149 */
uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring * ring)150 static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
151 {
152 struct amdgpu_device *adev = ring->adev;
153
154 if (ring->use_doorbell) {
155 /* XXX check if swapping is necessary on BE */
156 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
157 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
158 return;
159 }
160
161 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
162 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR,
163 lower_32_bits(ring->wptr));
164 else
165 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2,
166 lower_32_bits(ring->wptr));
167 }
168
169 /**
170 * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
171 *
172 * @ring: the engine to test on
173 *
174 */
uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring * ring)175 static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
176 {
177 struct amdgpu_device *adev = ring->adev;
178 uint32_t rptr;
179 unsigned i;
180 int r;
181
182 if (amdgpu_sriov_vf(adev))
183 return 0;
184
185 r = amdgpu_ring_alloc(ring, 16);
186 if (r)
187 return r;
188
189 rptr = amdgpu_ring_get_rptr(ring);
190
191 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
192 amdgpu_ring_commit(ring);
193
194 for (i = 0; i < adev->usec_timeout; i++) {
195 if (amdgpu_ring_get_rptr(ring) != rptr)
196 break;
197 udelay(1);
198 }
199
200 if (i >= adev->usec_timeout)
201 r = -ETIMEDOUT;
202
203 return r;
204 }
205
206 /**
207 * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
208 *
209 * @ring: ring we should submit the msg to
210 * @handle: session handle to use
211 * @bo: amdgpu object for which we query the offset
212 * @fence: optional fence to return
213 *
214 * Open up a stream for HW test
215 */
uvd_v7_0_enc_get_create_msg(struct amdgpu_ring * ring,u32 handle,struct amdgpu_bo * bo,struct dma_fence ** fence)216 static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle,
217 struct amdgpu_bo *bo,
218 struct dma_fence **fence)
219 {
220 const unsigned ib_size_dw = 16;
221 struct amdgpu_job *job;
222 struct amdgpu_ib *ib;
223 struct dma_fence *f = NULL;
224 uint64_t addr;
225 int i, r;
226
227 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
228 AMDGPU_IB_POOL_DIRECT, &job);
229 if (r)
230 return r;
231
232 ib = &job->ibs[0];
233 addr = amdgpu_bo_gpu_offset(bo);
234
235 ib->length_dw = 0;
236 ib->ptr[ib->length_dw++] = 0x00000018;
237 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
238 ib->ptr[ib->length_dw++] = handle;
239 ib->ptr[ib->length_dw++] = 0x00000000;
240 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
241 ib->ptr[ib->length_dw++] = addr;
242
243 ib->ptr[ib->length_dw++] = 0x00000014;
244 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
245 ib->ptr[ib->length_dw++] = 0x0000001c;
246 ib->ptr[ib->length_dw++] = 0x00000000;
247 ib->ptr[ib->length_dw++] = 0x00000000;
248
249 ib->ptr[ib->length_dw++] = 0x00000008;
250 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
251
252 for (i = ib->length_dw; i < ib_size_dw; ++i)
253 ib->ptr[i] = 0x0;
254
255 r = amdgpu_job_submit_direct(job, ring, &f);
256 if (r)
257 goto err;
258
259 if (fence)
260 *fence = dma_fence_get(f);
261 dma_fence_put(f);
262 return 0;
263
264 err:
265 amdgpu_job_free(job);
266 return r;
267 }
268
269 /**
270 * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
271 *
272 * @ring: ring we should submit the msg to
273 * @handle: session handle to use
274 * @bo: amdgpu object for which we query the offset
275 * @fence: optional fence to return
276 *
277 * Close up a stream for HW test or if userspace failed to do so
278 */
uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring * ring,u32 handle,struct amdgpu_bo * bo,struct dma_fence ** fence)279 static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle,
280 struct amdgpu_bo *bo,
281 struct dma_fence **fence)
282 {
283 const unsigned ib_size_dw = 16;
284 struct amdgpu_job *job;
285 struct amdgpu_ib *ib;
286 struct dma_fence *f = NULL;
287 uint64_t addr;
288 int i, r;
289
290 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
291 AMDGPU_IB_POOL_DIRECT, &job);
292 if (r)
293 return r;
294
295 ib = &job->ibs[0];
296 addr = amdgpu_bo_gpu_offset(bo);
297
298 ib->length_dw = 0;
299 ib->ptr[ib->length_dw++] = 0x00000018;
300 ib->ptr[ib->length_dw++] = 0x00000001;
301 ib->ptr[ib->length_dw++] = handle;
302 ib->ptr[ib->length_dw++] = 0x00000000;
303 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
304 ib->ptr[ib->length_dw++] = addr;
305
306 ib->ptr[ib->length_dw++] = 0x00000014;
307 ib->ptr[ib->length_dw++] = 0x00000002;
308 ib->ptr[ib->length_dw++] = 0x0000001c;
309 ib->ptr[ib->length_dw++] = 0x00000000;
310 ib->ptr[ib->length_dw++] = 0x00000000;
311
312 ib->ptr[ib->length_dw++] = 0x00000008;
313 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
314
315 for (i = ib->length_dw; i < ib_size_dw; ++i)
316 ib->ptr[i] = 0x0;
317
318 r = amdgpu_job_submit_direct(job, ring, &f);
319 if (r)
320 goto err;
321
322 if (fence)
323 *fence = dma_fence_get(f);
324 dma_fence_put(f);
325 return 0;
326
327 err:
328 amdgpu_job_free(job);
329 return r;
330 }
331
332 /**
333 * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
334 *
335 * @ring: the engine to test on
336 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
337 *
338 */
uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring * ring,long timeout)339 static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
340 {
341 struct dma_fence *fence = NULL;
342 struct amdgpu_bo *bo = ring->adev->uvd.ib_bo;
343 long r;
344
345 r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
346 if (r)
347 goto error;
348
349 r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
350 if (r)
351 goto error;
352
353 r = dma_fence_wait_timeout(fence, false, timeout);
354 if (r == 0)
355 r = -ETIMEDOUT;
356 else if (r > 0)
357 r = 0;
358
359 error:
360 dma_fence_put(fence);
361 return r;
362 }
363
uvd_v7_0_early_init(void * handle)364 static int uvd_v7_0_early_init(void *handle)
365 {
366 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
367
368 if (adev->asic_type == CHIP_VEGA20) {
369 u32 harvest;
370 int i;
371
372 adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
373 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
374 harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
375 if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
376 adev->uvd.harvest_config |= 1 << i;
377 }
378 }
379 if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
380 AMDGPU_UVD_HARVEST_UVD1))
381 /* both instances are harvested, disable the block */
382 return -ENOENT;
383 } else {
384 adev->uvd.num_uvd_inst = 1;
385 }
386
387 if (amdgpu_sriov_vf(adev))
388 adev->uvd.num_enc_rings = 1;
389 else
390 adev->uvd.num_enc_rings = 2;
391 uvd_v7_0_set_ring_funcs(adev);
392 uvd_v7_0_set_enc_ring_funcs(adev);
393 uvd_v7_0_set_irq_funcs(adev);
394
395 return 0;
396 }
397
uvd_v7_0_sw_init(void * handle)398 static int uvd_v7_0_sw_init(void *handle)
399 {
400 struct amdgpu_ring *ring;
401
402 int i, j, r;
403 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
404
405 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
406 if (adev->uvd.harvest_config & (1 << j))
407 continue;
408 /* UVD TRAP */
409 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
410 if (r)
411 return r;
412
413 /* UVD ENC TRAP */
414 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
415 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
416 if (r)
417 return r;
418 }
419 }
420
421 r = amdgpu_uvd_sw_init(adev);
422 if (r)
423 return r;
424
425 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
426 const struct common_firmware_header *hdr;
427 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
428 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
429 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
430 adev->firmware.fw_size +=
431 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
432
433 if (adev->uvd.num_uvd_inst == UVD7_MAX_HW_INSTANCES_VEGA20) {
434 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].ucode_id = AMDGPU_UCODE_ID_UVD1;
435 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].fw = adev->uvd.fw;
436 adev->firmware.fw_size +=
437 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
438 }
439 DRM_INFO("PSP loading UVD firmware\n");
440 }
441
442 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
443 if (adev->uvd.harvest_config & (1 << j))
444 continue;
445 if (!amdgpu_sriov_vf(adev)) {
446 ring = &adev->uvd.inst[j].ring;
447 ring->vm_hub = AMDGPU_MMHUB0(0);
448 sprintf(ring->name, "uvd_%d", ring->me);
449 r = amdgpu_ring_init(adev, ring, 512,
450 &adev->uvd.inst[j].irq, 0,
451 AMDGPU_RING_PRIO_DEFAULT, NULL);
452 if (r)
453 return r;
454 }
455
456 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
457 ring = &adev->uvd.inst[j].ring_enc[i];
458 ring->vm_hub = AMDGPU_MMHUB0(0);
459 sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
460 if (amdgpu_sriov_vf(adev)) {
461 ring->use_doorbell = true;
462
463 /* currently only use the first enconding ring for
464 * sriov, so set unused location for other unused rings.
465 */
466 if (i == 0)
467 ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
468 else
469 ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
470 }
471 r = amdgpu_ring_init(adev, ring, 512,
472 &adev->uvd.inst[j].irq, 0,
473 AMDGPU_RING_PRIO_DEFAULT, NULL);
474 if (r)
475 return r;
476 }
477 }
478
479 r = amdgpu_uvd_resume(adev);
480 if (r)
481 return r;
482
483 r = amdgpu_virt_alloc_mm_table(adev);
484 if (r)
485 return r;
486
487 return r;
488 }
489
uvd_v7_0_sw_fini(void * handle)490 static int uvd_v7_0_sw_fini(void *handle)
491 {
492 int i, j, r;
493 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
494
495 amdgpu_virt_free_mm_table(adev);
496
497 r = amdgpu_uvd_suspend(adev);
498 if (r)
499 return r;
500
501 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
502 if (adev->uvd.harvest_config & (1 << j))
503 continue;
504 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
505 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
506 }
507 return amdgpu_uvd_sw_fini(adev);
508 }
509
510 /**
511 * uvd_v7_0_hw_init - start and test UVD block
512 *
513 * @handle: handle used to pass amdgpu_device pointer
514 *
515 * Initialize the hardware, boot up the VCPU and do some testing
516 */
uvd_v7_0_hw_init(void * handle)517 static int uvd_v7_0_hw_init(void *handle)
518 {
519 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
520 struct amdgpu_ring *ring;
521 uint32_t tmp;
522 int i, j, r;
523
524 if (amdgpu_sriov_vf(adev))
525 r = uvd_v7_0_sriov_start(adev);
526 else
527 r = uvd_v7_0_start(adev);
528 if (r)
529 goto done;
530
531 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
532 if (adev->uvd.harvest_config & (1 << j))
533 continue;
534 ring = &adev->uvd.inst[j].ring;
535
536 if (!amdgpu_sriov_vf(adev)) {
537 r = amdgpu_ring_test_helper(ring);
538 if (r)
539 goto done;
540
541 r = amdgpu_ring_alloc(ring, 10);
542 if (r) {
543 DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r);
544 goto done;
545 }
546
547 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
548 mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
549 amdgpu_ring_write(ring, tmp);
550 amdgpu_ring_write(ring, 0xFFFFF);
551
552 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
553 mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
554 amdgpu_ring_write(ring, tmp);
555 amdgpu_ring_write(ring, 0xFFFFF);
556
557 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
558 mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
559 amdgpu_ring_write(ring, tmp);
560 amdgpu_ring_write(ring, 0xFFFFF);
561
562 /* Clear timeout status bits */
563 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
564 mmUVD_SEMA_TIMEOUT_STATUS), 0));
565 amdgpu_ring_write(ring, 0x8);
566
567 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
568 mmUVD_SEMA_CNTL), 0));
569 amdgpu_ring_write(ring, 3);
570
571 amdgpu_ring_commit(ring);
572 }
573
574 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
575 ring = &adev->uvd.inst[j].ring_enc[i];
576 r = amdgpu_ring_test_helper(ring);
577 if (r)
578 goto done;
579 }
580 }
581 done:
582 if (!r)
583 DRM_INFO("UVD and UVD ENC initialized successfully.\n");
584
585 return r;
586 }
587
588 /**
589 * uvd_v7_0_hw_fini - stop the hardware block
590 *
591 * @handle: handle used to pass amdgpu_device pointer
592 *
593 * Stop the UVD block, mark ring as not ready any more
594 */
uvd_v7_0_hw_fini(void * handle)595 static int uvd_v7_0_hw_fini(void *handle)
596 {
597 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
598
599 cancel_delayed_work_sync(&adev->uvd.idle_work);
600
601 if (!amdgpu_sriov_vf(adev))
602 uvd_v7_0_stop(adev);
603 else {
604 /* full access mode, so don't touch any UVD register */
605 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
606 }
607
608 return 0;
609 }
610
uvd_v7_0_prepare_suspend(void * handle)611 static int uvd_v7_0_prepare_suspend(void *handle)
612 {
613 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
614
615 return amdgpu_uvd_prepare_suspend(adev);
616 }
617
uvd_v7_0_suspend(void * handle)618 static int uvd_v7_0_suspend(void *handle)
619 {
620 int r;
621 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
622
623 /*
624 * Proper cleanups before halting the HW engine:
625 * - cancel the delayed idle work
626 * - enable powergating
627 * - enable clockgating
628 * - disable dpm
629 *
630 * TODO: to align with the VCN implementation, move the
631 * jobs for clockgating/powergating/dpm setting to
632 * ->set_powergating_state().
633 */
634 cancel_delayed_work_sync(&adev->uvd.idle_work);
635
636 if (adev->pm.dpm_enabled) {
637 amdgpu_dpm_enable_uvd(adev, false);
638 } else {
639 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
640 /* shutdown the UVD block */
641 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
642 AMD_PG_STATE_GATE);
643 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
644 AMD_CG_STATE_GATE);
645 }
646
647 r = uvd_v7_0_hw_fini(adev);
648 if (r)
649 return r;
650
651 return amdgpu_uvd_suspend(adev);
652 }
653
uvd_v7_0_resume(void * handle)654 static int uvd_v7_0_resume(void *handle)
655 {
656 int r;
657 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
658
659 r = amdgpu_uvd_resume(adev);
660 if (r)
661 return r;
662
663 return uvd_v7_0_hw_init(adev);
664 }
665
666 /**
667 * uvd_v7_0_mc_resume - memory controller programming
668 *
669 * @adev: amdgpu_device pointer
670 *
671 * Let the UVD memory controller know it's offsets
672 */
uvd_v7_0_mc_resume(struct amdgpu_device * adev)673 static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
674 {
675 uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
676 uint32_t offset;
677 int i;
678
679 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
680 if (adev->uvd.harvest_config & (1 << i))
681 continue;
682 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
683 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
684 i == 0 ?
685 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo :
686 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_lo);
687 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
688 i == 0 ?
689 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi :
690 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_hi);
691 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
692 offset = 0;
693 } else {
694 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
695 lower_32_bits(adev->uvd.inst[i].gpu_addr));
696 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
697 upper_32_bits(adev->uvd.inst[i].gpu_addr));
698 offset = size;
699 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
700 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
701 }
702
703 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
704
705 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
706 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
707 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
708 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
709 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
710 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
711
712 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
713 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
714 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
715 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
716 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
717 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2,
718 AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
719
720 WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG,
721 adev->gfx.config.gb_addr_config);
722 WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG,
723 adev->gfx.config.gb_addr_config);
724 WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG,
725 adev->gfx.config.gb_addr_config);
726
727 WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
728 }
729 }
730
uvd_v7_0_mmsch_start(struct amdgpu_device * adev,struct amdgpu_mm_table * table)731 static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
732 struct amdgpu_mm_table *table)
733 {
734 uint32_t data = 0, loop;
735 uint64_t addr = table->gpu_addr;
736 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
737 uint32_t size;
738 int i;
739
740 size = header->header_size + header->vce_table_size + header->uvd_table_size;
741
742 /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
743 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
744 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
745
746 /* 2, update vmid of descriptor */
747 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
748 data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
749 data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
750 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
751
752 /* 3, notify mmsch about the size of this descriptor */
753 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
754
755 /* 4, set resp to zero */
756 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
757
758 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
759 if (adev->uvd.harvest_config & (1 << i))
760 continue;
761 WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
762 *adev->uvd.inst[i].ring_enc[0].wptr_cpu_addr = 0;
763 adev->uvd.inst[i].ring_enc[0].wptr = 0;
764 adev->uvd.inst[i].ring_enc[0].wptr_old = 0;
765 }
766 /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
767 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
768
769 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
770 loop = 1000;
771 while ((data & 0x10000002) != 0x10000002) {
772 udelay(10);
773 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
774 loop--;
775 if (!loop)
776 break;
777 }
778
779 if (!loop) {
780 dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
781 return -EBUSY;
782 }
783
784 return 0;
785 }
786
uvd_v7_0_sriov_start(struct amdgpu_device * adev)787 static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
788 {
789 struct amdgpu_ring *ring;
790 uint32_t offset, size, tmp;
791 uint32_t table_size = 0;
792 struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
793 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
794 struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
795 struct mmsch_v1_0_cmd_end end = { {0} };
796 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
797 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
798 uint8_t i = 0;
799
800 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
801 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
802 direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
803 end.cmd_header.command_type = MMSCH_COMMAND__END;
804
805 if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
806 header->version = MMSCH_VERSION;
807 header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
808
809 if (header->vce_table_offset == 0 && header->vce_table_size == 0)
810 header->uvd_table_offset = header->header_size;
811 else
812 header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
813
814 init_table += header->uvd_table_offset;
815
816 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
817 if (adev->uvd.harvest_config & (1 << i))
818 continue;
819 ring = &adev->uvd.inst[i].ring;
820 ring->wptr = 0;
821 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
822
823 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
824 0xFFFFFFFF, 0x00000004);
825 /* mc resume*/
826 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
827 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
828 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
829 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
830 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
831 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
832 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
833 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
834 offset = 0;
835 } else {
836 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
837 lower_32_bits(adev->uvd.inst[i].gpu_addr));
838 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
839 upper_32_bits(adev->uvd.inst[i].gpu_addr));
840 offset = size;
841 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
842 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
843
844 }
845
846 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
847
848 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
849 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
850 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
851 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
852 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
853 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
854
855 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
856 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
857 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
858 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
859 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
860 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
861 AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
862
863 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
864 /* mc resume end*/
865
866 /* disable clock gating */
867 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL),
868 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
869
870 /* disable interupt */
871 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
872 ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
873
874 /* stall UMC and register bus before resetting VCPU */
875 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
876 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
877 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
878
879 /* put LMI, VCPU, RBC etc... into reset */
880 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
881 (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
882 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
883 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
884 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
885 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
886 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
887 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
888 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
889
890 /* initialize UVD memory controller */
891 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL),
892 (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
893 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
894 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
895 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
896 UVD_LMI_CTRL__REQ_MODE_MASK |
897 0x00100000L));
898
899 /* take all subblocks out of reset, except VCPU */
900 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
901 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
902
903 /* enable VCPU clock */
904 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
905 UVD_VCPU_CNTL__CLK_EN_MASK);
906
907 /* enable master interrupt */
908 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
909 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
910 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
911
912 /* clear the bit 4 of UVD_STATUS */
913 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
914 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
915
916 /* force RBC into idle state */
917 size = order_base_2(ring->ring_size);
918 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
919 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
920 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
921
922 ring = &adev->uvd.inst[i].ring_enc[0];
923 ring->wptr = 0;
924 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr);
925 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
926 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4);
927
928 /* boot up the VCPU */
929 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0);
930
931 /* enable UMC */
932 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
933 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
934
935 MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02);
936 }
937 /* add end packet */
938 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
939 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
940 header->uvd_table_size = table_size;
941
942 }
943 return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
944 }
945
946 /**
947 * uvd_v7_0_start - start UVD block
948 *
949 * @adev: amdgpu_device pointer
950 *
951 * Setup and start the UVD block
952 */
uvd_v7_0_start(struct amdgpu_device * adev)953 static int uvd_v7_0_start(struct amdgpu_device *adev)
954 {
955 struct amdgpu_ring *ring;
956 uint32_t rb_bufsz, tmp;
957 uint32_t lmi_swap_cntl;
958 uint32_t mp_swap_cntl;
959 int i, j, k, r;
960
961 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
962 if (adev->uvd.harvest_config & (1 << k))
963 continue;
964 /* disable DPG */
965 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
966 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
967 }
968
969 /* disable byte swapping */
970 lmi_swap_cntl = 0;
971 mp_swap_cntl = 0;
972
973 uvd_v7_0_mc_resume(adev);
974
975 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
976 if (adev->uvd.harvest_config & (1 << k))
977 continue;
978 ring = &adev->uvd.inst[k].ring;
979 /* disable clock gating */
980 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
981 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
982
983 /* disable interupt */
984 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0,
985 ~UVD_MASTINT_EN__VCPU_EN_MASK);
986
987 /* stall UMC and register bus before resetting VCPU */
988 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2),
989 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
990 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
991 mdelay(1);
992
993 /* put LMI, VCPU, RBC etc... into reset */
994 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
995 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
996 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
997 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
998 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
999 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
1000 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
1001 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
1002 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
1003 mdelay(5);
1004
1005 /* initialize UVD memory controller */
1006 WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL,
1007 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1008 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1009 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1010 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1011 UVD_LMI_CTRL__REQ_MODE_MASK |
1012 0x00100000L);
1013
1014 #ifdef __BIG_ENDIAN
1015 /* swap (8 in 32) RB and IB */
1016 lmi_swap_cntl = 0xa;
1017 mp_swap_cntl = 0;
1018 #endif
1019 WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
1020 WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
1021
1022 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040);
1023 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0);
1024 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040);
1025 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0);
1026 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0);
1027 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88);
1028
1029 /* take all subblocks out of reset, except VCPU */
1030 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
1031 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1032 mdelay(5);
1033
1034 /* enable VCPU clock */
1035 WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL,
1036 UVD_VCPU_CNTL__CLK_EN_MASK);
1037
1038 /* enable UMC */
1039 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0,
1040 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1041
1042 /* boot up the VCPU */
1043 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0);
1044 mdelay(10);
1045
1046 for (i = 0; i < 10; ++i) {
1047 uint32_t status;
1048
1049 for (j = 0; j < 100; ++j) {
1050 status = RREG32_SOC15(UVD, k, mmUVD_STATUS);
1051 if (status & 2)
1052 break;
1053 mdelay(10);
1054 }
1055 r = 0;
1056 if (status & 2)
1057 break;
1058
1059 DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k);
1060 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET),
1061 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1062 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1063 mdelay(10);
1064 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0,
1065 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1066 mdelay(10);
1067 r = -1;
1068 }
1069
1070 if (r) {
1071 DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k);
1072 return r;
1073 }
1074 /* enable master interrupt */
1075 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN),
1076 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
1077 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
1078
1079 /* clear the bit 4 of UVD_STATUS */
1080 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0,
1081 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1082
1083 /* force RBC into idle state */
1084 rb_bufsz = order_base_2(ring->ring_size);
1085 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1086 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1087 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1088 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
1089 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1090 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1091 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp);
1092
1093 /* set the write pointer delay */
1094 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0);
1095
1096 /* set the wb address */
1097 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
1098 (upper_32_bits(ring->gpu_addr) >> 2));
1099
1100 /* program the RB_BASE for ring buffer */
1101 WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1102 lower_32_bits(ring->gpu_addr));
1103 WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1104 upper_32_bits(ring->gpu_addr));
1105
1106 /* Initialize the ring buffer's read and write pointers */
1107 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0);
1108
1109 ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR);
1110 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR,
1111 lower_32_bits(ring->wptr));
1112
1113 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0,
1114 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1115
1116 ring = &adev->uvd.inst[k].ring_enc[0];
1117 WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1118 WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1119 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr);
1120 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1121 WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4);
1122
1123 ring = &adev->uvd.inst[k].ring_enc[1];
1124 WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1125 WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1126 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1127 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1128 WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4);
1129 }
1130 return 0;
1131 }
1132
1133 /**
1134 * uvd_v7_0_stop - stop UVD block
1135 *
1136 * @adev: amdgpu_device pointer
1137 *
1138 * stop the UVD block
1139 */
uvd_v7_0_stop(struct amdgpu_device * adev)1140 static void uvd_v7_0_stop(struct amdgpu_device *adev)
1141 {
1142 uint8_t i = 0;
1143
1144 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1145 if (adev->uvd.harvest_config & (1 << i))
1146 continue;
1147 /* force RBC into idle state */
1148 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
1149
1150 /* Stall UMC and register bus before resetting VCPU */
1151 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
1152 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1153 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1154 mdelay(1);
1155
1156 /* put VCPU into reset */
1157 WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET,
1158 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1159 mdelay(5);
1160
1161 /* disable VCPU clock */
1162 WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0);
1163
1164 /* Unstall UMC and register bus */
1165 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
1166 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1167 }
1168 }
1169
1170 /**
1171 * uvd_v7_0_ring_emit_fence - emit an fence & trap command
1172 *
1173 * @ring: amdgpu_ring pointer
1174 * @addr: address
1175 * @seq: sequence number
1176 * @flags: fence related flags
1177 *
1178 * Write a fence and a trap command to the ring.
1179 */
uvd_v7_0_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)1180 static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1181 unsigned flags)
1182 {
1183 struct amdgpu_device *adev = ring->adev;
1184
1185 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1186
1187 amdgpu_ring_write(ring,
1188 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1189 amdgpu_ring_write(ring, seq);
1190 amdgpu_ring_write(ring,
1191 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1192 amdgpu_ring_write(ring, addr & 0xffffffff);
1193 amdgpu_ring_write(ring,
1194 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1195 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1196 amdgpu_ring_write(ring,
1197 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1198 amdgpu_ring_write(ring, 0);
1199
1200 amdgpu_ring_write(ring,
1201 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1202 amdgpu_ring_write(ring, 0);
1203 amdgpu_ring_write(ring,
1204 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1205 amdgpu_ring_write(ring, 0);
1206 amdgpu_ring_write(ring,
1207 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1208 amdgpu_ring_write(ring, 2);
1209 }
1210
1211 /**
1212 * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
1213 *
1214 * @ring: amdgpu_ring pointer
1215 * @addr: address
1216 * @seq: sequence number
1217 * @flags: fence related flags
1218 *
1219 * Write enc a fence and a trap command to the ring.
1220 */
uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)1221 static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1222 u64 seq, unsigned flags)
1223 {
1224
1225 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1226
1227 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
1228 amdgpu_ring_write(ring, addr);
1229 amdgpu_ring_write(ring, upper_32_bits(addr));
1230 amdgpu_ring_write(ring, seq);
1231 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
1232 }
1233
1234 /**
1235 * uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing
1236 *
1237 * @ring: amdgpu_ring pointer
1238 */
uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring * ring)1239 static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1240 {
1241 /* The firmware doesn't seem to like touching registers at this point. */
1242 }
1243
1244 /**
1245 * uvd_v7_0_ring_test_ring - register write test
1246 *
1247 * @ring: amdgpu_ring pointer
1248 *
1249 * Test if we can successfully write to the context register
1250 */
uvd_v7_0_ring_test_ring(struct amdgpu_ring * ring)1251 static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
1252 {
1253 struct amdgpu_device *adev = ring->adev;
1254 uint32_t tmp = 0;
1255 unsigned i;
1256 int r;
1257
1258 WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
1259 r = amdgpu_ring_alloc(ring, 3);
1260 if (r)
1261 return r;
1262
1263 amdgpu_ring_write(ring,
1264 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1265 amdgpu_ring_write(ring, 0xDEADBEEF);
1266 amdgpu_ring_commit(ring);
1267 for (i = 0; i < adev->usec_timeout; i++) {
1268 tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
1269 if (tmp == 0xDEADBEEF)
1270 break;
1271 udelay(1);
1272 }
1273
1274 if (i >= adev->usec_timeout)
1275 r = -ETIMEDOUT;
1276
1277 return r;
1278 }
1279
1280 /**
1281 * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
1282 *
1283 * @p: the CS parser with the IBs
1284 * @job: which job this ib is in
1285 * @ib: which IB to patch
1286 *
1287 */
uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser * p,struct amdgpu_job * job,struct amdgpu_ib * ib)1288 static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1289 struct amdgpu_job *job,
1290 struct amdgpu_ib *ib)
1291 {
1292 struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
1293 unsigned i;
1294
1295 /* No patching necessary for the first instance */
1296 if (!ring->me)
1297 return 0;
1298
1299 for (i = 0; i < ib->length_dw; i += 2) {
1300 uint32_t reg = amdgpu_ib_get_value(ib, i);
1301
1302 reg -= p->adev->reg_offset[UVD_HWIP][0][1];
1303 reg += p->adev->reg_offset[UVD_HWIP][1][1];
1304
1305 amdgpu_ib_set_value(ib, i, reg);
1306 }
1307 return 0;
1308 }
1309
1310 /**
1311 * uvd_v7_0_ring_emit_ib - execute indirect buffer
1312 *
1313 * @ring: amdgpu_ring pointer
1314 * @job: job to retrieve vmid from
1315 * @ib: indirect buffer to execute
1316 * @flags: unused
1317 *
1318 * Write ring commands to execute the indirect buffer
1319 */
uvd_v7_0_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)1320 static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
1321 struct amdgpu_job *job,
1322 struct amdgpu_ib *ib,
1323 uint32_t flags)
1324 {
1325 struct amdgpu_device *adev = ring->adev;
1326 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1327
1328 amdgpu_ring_write(ring,
1329 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
1330 amdgpu_ring_write(ring, vmid);
1331
1332 amdgpu_ring_write(ring,
1333 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1334 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1335 amdgpu_ring_write(ring,
1336 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1337 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1338 amdgpu_ring_write(ring,
1339 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0));
1340 amdgpu_ring_write(ring, ib->length_dw);
1341 }
1342
1343 /**
1344 * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
1345 *
1346 * @ring: amdgpu_ring pointer
1347 * @job: job to retrive vmid from
1348 * @ib: indirect buffer to execute
1349 * @flags: unused
1350 *
1351 * Write enc ring commands to execute the indirect buffer
1352 */
uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)1353 static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1354 struct amdgpu_job *job,
1355 struct amdgpu_ib *ib,
1356 uint32_t flags)
1357 {
1358 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1359
1360 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1361 amdgpu_ring_write(ring, vmid);
1362 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1363 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1364 amdgpu_ring_write(ring, ib->length_dw);
1365 }
1366
uvd_v7_0_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)1367 static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
1368 uint32_t reg, uint32_t val)
1369 {
1370 struct amdgpu_device *adev = ring->adev;
1371
1372 amdgpu_ring_write(ring,
1373 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1374 amdgpu_ring_write(ring, reg << 2);
1375 amdgpu_ring_write(ring,
1376 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1377 amdgpu_ring_write(ring, val);
1378 amdgpu_ring_write(ring,
1379 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1380 amdgpu_ring_write(ring, 8);
1381 }
1382
uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)1383 static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1384 uint32_t val, uint32_t mask)
1385 {
1386 struct amdgpu_device *adev = ring->adev;
1387
1388 amdgpu_ring_write(ring,
1389 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1390 amdgpu_ring_write(ring, reg << 2);
1391 amdgpu_ring_write(ring,
1392 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1393 amdgpu_ring_write(ring, val);
1394 amdgpu_ring_write(ring,
1395 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0));
1396 amdgpu_ring_write(ring, mask);
1397 amdgpu_ring_write(ring,
1398 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1399 amdgpu_ring_write(ring, 12);
1400 }
1401
uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)1402 static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1403 unsigned vmid, uint64_t pd_addr)
1404 {
1405 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
1406 uint32_t data0, data1, mask;
1407
1408 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1409
1410 /* wait for reg writes */
1411 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1412 data1 = lower_32_bits(pd_addr);
1413 mask = 0xffffffff;
1414 uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
1415 }
1416
uvd_v7_0_ring_insert_nop(struct amdgpu_ring * ring,uint32_t count)1417 static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1418 {
1419 struct amdgpu_device *adev = ring->adev;
1420 int i;
1421
1422 WARN_ON(ring->wptr % 2 || count % 2);
1423
1424 for (i = 0; i < count / 2; i++) {
1425 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
1426 amdgpu_ring_write(ring, 0);
1427 }
1428 }
1429
uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring * ring)1430 static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1431 {
1432 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1433 }
1434
uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)1435 static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1436 uint32_t reg, uint32_t val,
1437 uint32_t mask)
1438 {
1439 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1440 amdgpu_ring_write(ring, reg << 2);
1441 amdgpu_ring_write(ring, mask);
1442 amdgpu_ring_write(ring, val);
1443 }
1444
uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned int vmid,uint64_t pd_addr)1445 static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1446 unsigned int vmid, uint64_t pd_addr)
1447 {
1448 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
1449
1450 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1451
1452 /* wait for reg writes */
1453 uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1454 vmid * hub->ctx_addr_distance,
1455 lower_32_bits(pd_addr), 0xffffffff);
1456 }
1457
uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)1458 static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1459 uint32_t reg, uint32_t val)
1460 {
1461 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1462 amdgpu_ring_write(ring, reg << 2);
1463 amdgpu_ring_write(ring, val);
1464 }
1465
1466 #if 0
1467 static bool uvd_v7_0_is_idle(void *handle)
1468 {
1469 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1470
1471 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1472 }
1473
1474 static int uvd_v7_0_wait_for_idle(void *handle)
1475 {
1476 unsigned i;
1477 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1478
1479 for (i = 0; i < adev->usec_timeout; i++) {
1480 if (uvd_v7_0_is_idle(handle))
1481 return 0;
1482 }
1483 return -ETIMEDOUT;
1484 }
1485
1486 #define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
1487 static bool uvd_v7_0_check_soft_reset(void *handle)
1488 {
1489 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1490 u32 srbm_soft_reset = 0;
1491 u32 tmp = RREG32(mmSRBM_STATUS);
1492
1493 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1494 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1495 (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
1496 AMDGPU_UVD_STATUS_BUSY_MASK))
1497 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1498 SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1499
1500 if (srbm_soft_reset) {
1501 adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
1502 return true;
1503 } else {
1504 adev->uvd.inst[ring->me].srbm_soft_reset = 0;
1505 return false;
1506 }
1507 }
1508
1509 static int uvd_v7_0_pre_soft_reset(void *handle)
1510 {
1511 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1512
1513 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1514 return 0;
1515
1516 uvd_v7_0_stop(adev);
1517 return 0;
1518 }
1519
1520 static int uvd_v7_0_soft_reset(void *handle)
1521 {
1522 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1523 u32 srbm_soft_reset;
1524
1525 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1526 return 0;
1527 srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
1528
1529 if (srbm_soft_reset) {
1530 u32 tmp;
1531
1532 tmp = RREG32(mmSRBM_SOFT_RESET);
1533 tmp |= srbm_soft_reset;
1534 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1535 WREG32(mmSRBM_SOFT_RESET, tmp);
1536 tmp = RREG32(mmSRBM_SOFT_RESET);
1537
1538 udelay(50);
1539
1540 tmp &= ~srbm_soft_reset;
1541 WREG32(mmSRBM_SOFT_RESET, tmp);
1542 tmp = RREG32(mmSRBM_SOFT_RESET);
1543
1544 /* Wait a little for things to settle down */
1545 udelay(50);
1546 }
1547
1548 return 0;
1549 }
1550
1551 static int uvd_v7_0_post_soft_reset(void *handle)
1552 {
1553 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1554
1555 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1556 return 0;
1557
1558 mdelay(5);
1559
1560 return uvd_v7_0_start(adev);
1561 }
1562 #endif
1563
uvd_v7_0_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)1564 static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
1565 struct amdgpu_irq_src *source,
1566 unsigned type,
1567 enum amdgpu_interrupt_state state)
1568 {
1569 // TODO
1570 return 0;
1571 }
1572
uvd_v7_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1573 static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1574 struct amdgpu_irq_src *source,
1575 struct amdgpu_iv_entry *entry)
1576 {
1577 uint32_t ip_instance;
1578
1579 switch (entry->client_id) {
1580 case SOC15_IH_CLIENTID_UVD:
1581 ip_instance = 0;
1582 break;
1583 case SOC15_IH_CLIENTID_UVD1:
1584 ip_instance = 1;
1585 break;
1586 default:
1587 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1588 return 0;
1589 }
1590
1591 DRM_DEBUG("IH: UVD TRAP\n");
1592
1593 switch (entry->src_id) {
1594 case 124:
1595 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring);
1596 break;
1597 case 119:
1598 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]);
1599 break;
1600 case 120:
1601 if (!amdgpu_sriov_vf(adev))
1602 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]);
1603 break;
1604 default:
1605 DRM_ERROR("Unhandled interrupt: %d %d\n",
1606 entry->src_id, entry->src_data[0]);
1607 break;
1608 }
1609
1610 return 0;
1611 }
1612
1613 #if 0
1614 static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
1615 {
1616 uint32_t data, data1, data2, suvd_flags;
1617
1618 data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
1619 data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1620 data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
1621
1622 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1623 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1624
1625 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1626 UVD_SUVD_CGC_GATE__SIT_MASK |
1627 UVD_SUVD_CGC_GATE__SMP_MASK |
1628 UVD_SUVD_CGC_GATE__SCM_MASK |
1629 UVD_SUVD_CGC_GATE__SDB_MASK;
1630
1631 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1632 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1633 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1634
1635 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1636 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1637 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1638 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1639 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1640 UVD_CGC_CTRL__SYS_MODE_MASK |
1641 UVD_CGC_CTRL__UDEC_MODE_MASK |
1642 UVD_CGC_CTRL__MPEG2_MODE_MASK |
1643 UVD_CGC_CTRL__REGS_MODE_MASK |
1644 UVD_CGC_CTRL__RBC_MODE_MASK |
1645 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1646 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1647 UVD_CGC_CTRL__IDCT_MODE_MASK |
1648 UVD_CGC_CTRL__MPRD_MODE_MASK |
1649 UVD_CGC_CTRL__MPC_MODE_MASK |
1650 UVD_CGC_CTRL__LBSI_MODE_MASK |
1651 UVD_CGC_CTRL__LRBBM_MODE_MASK |
1652 UVD_CGC_CTRL__WCB_MODE_MASK |
1653 UVD_CGC_CTRL__VCPU_MODE_MASK |
1654 UVD_CGC_CTRL__JPEG_MODE_MASK |
1655 UVD_CGC_CTRL__JPEG2_MODE_MASK |
1656 UVD_CGC_CTRL__SCPU_MODE_MASK);
1657 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1658 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1659 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1660 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1661 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1662 data1 |= suvd_flags;
1663
1664 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
1665 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
1666 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1667 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
1668 }
1669
1670 static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
1671 {
1672 uint32_t data, data1, cgc_flags, suvd_flags;
1673
1674 data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
1675 data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1676
1677 cgc_flags = UVD_CGC_GATE__SYS_MASK |
1678 UVD_CGC_GATE__UDEC_MASK |
1679 UVD_CGC_GATE__MPEG2_MASK |
1680 UVD_CGC_GATE__RBC_MASK |
1681 UVD_CGC_GATE__LMI_MC_MASK |
1682 UVD_CGC_GATE__IDCT_MASK |
1683 UVD_CGC_GATE__MPRD_MASK |
1684 UVD_CGC_GATE__MPC_MASK |
1685 UVD_CGC_GATE__LBSI_MASK |
1686 UVD_CGC_GATE__LRBBM_MASK |
1687 UVD_CGC_GATE__UDEC_RE_MASK |
1688 UVD_CGC_GATE__UDEC_CM_MASK |
1689 UVD_CGC_GATE__UDEC_IT_MASK |
1690 UVD_CGC_GATE__UDEC_DB_MASK |
1691 UVD_CGC_GATE__UDEC_MP_MASK |
1692 UVD_CGC_GATE__WCB_MASK |
1693 UVD_CGC_GATE__VCPU_MASK |
1694 UVD_CGC_GATE__SCPU_MASK |
1695 UVD_CGC_GATE__JPEG_MASK |
1696 UVD_CGC_GATE__JPEG2_MASK;
1697
1698 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1699 UVD_SUVD_CGC_GATE__SIT_MASK |
1700 UVD_SUVD_CGC_GATE__SMP_MASK |
1701 UVD_SUVD_CGC_GATE__SCM_MASK |
1702 UVD_SUVD_CGC_GATE__SDB_MASK;
1703
1704 data |= cgc_flags;
1705 data1 |= suvd_flags;
1706
1707 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
1708 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1709 }
1710
1711 static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
1712 {
1713 u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
1714
1715 if (enable)
1716 tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1717 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1718 else
1719 tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1720 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1721
1722 WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
1723 }
1724
1725
1726 static int uvd_v7_0_set_clockgating_state(void *handle,
1727 enum amd_clockgating_state state)
1728 {
1729 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1730 bool enable = (state == AMD_CG_STATE_GATE);
1731
1732 uvd_v7_0_set_bypass_mode(adev, enable);
1733
1734 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1735 return 0;
1736
1737 if (enable) {
1738 /* disable HW gating and enable Sw gating */
1739 uvd_v7_0_set_sw_clock_gating(adev);
1740 } else {
1741 /* wait for STATUS to clear */
1742 if (uvd_v7_0_wait_for_idle(handle))
1743 return -EBUSY;
1744
1745 /* enable HW gates because UVD is idle */
1746 /* uvd_v7_0_set_hw_clock_gating(adev); */
1747 }
1748
1749 return 0;
1750 }
1751
1752 static int uvd_v7_0_set_powergating_state(void *handle,
1753 enum amd_powergating_state state)
1754 {
1755 /* This doesn't actually powergate the UVD block.
1756 * That's done in the dpm code via the SMC. This
1757 * just re-inits the block as necessary. The actual
1758 * gating still happens in the dpm code. We should
1759 * revisit this when there is a cleaner line between
1760 * the smc and the hw blocks
1761 */
1762 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1763
1764 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1765 return 0;
1766
1767 WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1768
1769 if (state == AMD_PG_STATE_GATE) {
1770 uvd_v7_0_stop(adev);
1771 return 0;
1772 } else {
1773 return uvd_v7_0_start(adev);
1774 }
1775 }
1776 #endif
1777
uvd_v7_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)1778 static int uvd_v7_0_set_clockgating_state(void *handle,
1779 enum amd_clockgating_state state)
1780 {
1781 /* needed for driver unload*/
1782 return 0;
1783 }
1784
1785 const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
1786 .name = "uvd_v7_0",
1787 .early_init = uvd_v7_0_early_init,
1788 .late_init = NULL,
1789 .sw_init = uvd_v7_0_sw_init,
1790 .sw_fini = uvd_v7_0_sw_fini,
1791 .hw_init = uvd_v7_0_hw_init,
1792 .hw_fini = uvd_v7_0_hw_fini,
1793 .prepare_suspend = uvd_v7_0_prepare_suspend,
1794 .suspend = uvd_v7_0_suspend,
1795 .resume = uvd_v7_0_resume,
1796 .is_idle = NULL /* uvd_v7_0_is_idle */,
1797 .wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
1798 .check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
1799 .pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
1800 .soft_reset = NULL /* uvd_v7_0_soft_reset */,
1801 .post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
1802 .set_clockgating_state = uvd_v7_0_set_clockgating_state,
1803 .set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
1804 };
1805
1806 static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1807 .type = AMDGPU_RING_TYPE_UVD,
1808 .align_mask = 0xf,
1809 .support_64bit_ptrs = false,
1810 .no_user_fence = true,
1811 .get_rptr = uvd_v7_0_ring_get_rptr,
1812 .get_wptr = uvd_v7_0_ring_get_wptr,
1813 .set_wptr = uvd_v7_0_ring_set_wptr,
1814 .patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
1815 .emit_frame_size =
1816 6 + /* hdp invalidate */
1817 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1818 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1819 8 + /* uvd_v7_0_ring_emit_vm_flush */
1820 14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
1821 .emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
1822 .emit_ib = uvd_v7_0_ring_emit_ib,
1823 .emit_fence = uvd_v7_0_ring_emit_fence,
1824 .emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1825 .emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1826 .test_ring = uvd_v7_0_ring_test_ring,
1827 .test_ib = amdgpu_uvd_ring_test_ib,
1828 .insert_nop = uvd_v7_0_ring_insert_nop,
1829 .pad_ib = amdgpu_ring_generic_pad_ib,
1830 .begin_use = amdgpu_uvd_ring_begin_use,
1831 .end_use = amdgpu_uvd_ring_end_use,
1832 .emit_wreg = uvd_v7_0_ring_emit_wreg,
1833 .emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
1834 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1835 };
1836
1837 static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1838 .type = AMDGPU_RING_TYPE_UVD_ENC,
1839 .align_mask = 0x3f,
1840 .nop = HEVC_ENC_CMD_NO_OP,
1841 .support_64bit_ptrs = false,
1842 .no_user_fence = true,
1843 .get_rptr = uvd_v7_0_enc_ring_get_rptr,
1844 .get_wptr = uvd_v7_0_enc_ring_get_wptr,
1845 .set_wptr = uvd_v7_0_enc_ring_set_wptr,
1846 .emit_frame_size =
1847 3 + 3 + /* hdp flush / invalidate */
1848 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1849 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1850 4 + /* uvd_v7_0_enc_ring_emit_vm_flush */
1851 5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1852 1, /* uvd_v7_0_enc_ring_insert_end */
1853 .emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
1854 .emit_ib = uvd_v7_0_enc_ring_emit_ib,
1855 .emit_fence = uvd_v7_0_enc_ring_emit_fence,
1856 .emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
1857 .test_ring = uvd_v7_0_enc_ring_test_ring,
1858 .test_ib = uvd_v7_0_enc_ring_test_ib,
1859 .insert_nop = amdgpu_ring_insert_nop,
1860 .insert_end = uvd_v7_0_enc_ring_insert_end,
1861 .pad_ib = amdgpu_ring_generic_pad_ib,
1862 .begin_use = amdgpu_uvd_ring_begin_use,
1863 .end_use = amdgpu_uvd_ring_end_use,
1864 .emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
1865 .emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
1866 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1867 };
1868
uvd_v7_0_set_ring_funcs(struct amdgpu_device * adev)1869 static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1870 {
1871 int i;
1872
1873 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1874 if (adev->uvd.harvest_config & (1 << i))
1875 continue;
1876 adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
1877 adev->uvd.inst[i].ring.me = i;
1878 DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
1879 }
1880 }
1881
uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device * adev)1882 static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1883 {
1884 int i, j;
1885
1886 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
1887 if (adev->uvd.harvest_config & (1 << j))
1888 continue;
1889 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
1890 adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1891 adev->uvd.inst[j].ring_enc[i].me = j;
1892 }
1893
1894 DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j);
1895 }
1896 }
1897
1898 static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
1899 .set = uvd_v7_0_set_interrupt_state,
1900 .process = uvd_v7_0_process_interrupt,
1901 };
1902
uvd_v7_0_set_irq_funcs(struct amdgpu_device * adev)1903 static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1904 {
1905 int i;
1906
1907 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1908 if (adev->uvd.harvest_config & (1 << i))
1909 continue;
1910 adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
1911 adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
1912 }
1913 }
1914
1915 const struct amdgpu_ip_block_version uvd_v7_0_ip_block = {
1916 .type = AMD_IP_BLOCK_TYPE_UVD,
1917 .major = 7,
1918 .minor = 0,
1919 .rev = 0,
1920 .funcs = &uvd_v7_0_ip_funcs,
1921 };
1922