1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 * Authors: Christian König <christian.koenig@amd.com> 26 */ 27 28 #include <linux/firmware.h> 29 #include <drm/drmP.h> 30 #include "amdgpu.h" 31 #include "amdgpu_vce.h" 32 #include "vid.h" 33 #include "vce/vce_3_0_d.h" 34 #include "vce/vce_3_0_sh_mask.h" 35 #include "oss/oss_2_0_d.h" 36 #include "oss/oss_2_0_sh_mask.h" 37 #include "gca/gfx_8_0_d.h" 38 39 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 40 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 41 42 #define VCE_V3_0_FW_SIZE (384 * 1024) 43 #define VCE_V3_0_STACK_SIZE (64 * 1024) 44 #define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024)) 45 46 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); 47 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); 48 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); 49 50 /** 51 * vce_v3_0_ring_get_rptr - get read pointer 52 * 53 * @ring: amdgpu_ring pointer 54 * 55 * Returns the current hardware read pointer 56 */ 57 static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) 58 { 59 struct amdgpu_device *adev = ring->adev; 60 61 if (ring == &adev->vce.ring[0]) 62 return RREG32(mmVCE_RB_RPTR); 63 else 64 return RREG32(mmVCE_RB_RPTR2); 65 } 66 67 /** 68 * vce_v3_0_ring_get_wptr - get write pointer 69 * 70 * @ring: amdgpu_ring pointer 71 * 72 * Returns the current hardware write pointer 73 */ 74 static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring) 75 { 76 struct amdgpu_device *adev = ring->adev; 77 78 if (ring == &adev->vce.ring[0]) 79 return RREG32(mmVCE_RB_WPTR); 80 else 81 return RREG32(mmVCE_RB_WPTR2); 82 } 83 84 /** 85 * vce_v3_0_ring_set_wptr - set write pointer 86 * 87 * @ring: amdgpu_ring pointer 88 * 89 * Commits the write pointer to the hardware 90 */ 91 static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring) 92 { 93 struct amdgpu_device *adev = ring->adev; 94 95 if (ring == &adev->vce.ring[0]) 96 WREG32(mmVCE_RB_WPTR, ring->wptr); 97 else 98 WREG32(mmVCE_RB_WPTR2, ring->wptr); 99 } 100 101 /** 102 * vce_v3_0_start - start VCE block 103 * 104 * @adev: amdgpu_device pointer 105 * 106 * Setup and start the VCE block 107 */ 108 static int vce_v3_0_start(struct amdgpu_device *adev) 109 { 110 struct amdgpu_ring *ring; 111 int idx, i, j, r; 112 113 mutex_lock(&adev->grbm_idx_mutex); 114 for (idx = 0; idx < 2; ++idx) { 115 if(idx == 0) 116 WREG32_P(mmGRBM_GFX_INDEX, 0, 117 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); 118 else 119 WREG32_P(mmGRBM_GFX_INDEX, 120 GRBM_GFX_INDEX__VCE_INSTANCE_MASK, 121 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); 122 123 vce_v3_0_mc_resume(adev, idx); 124 125 /* set BUSY flag */ 126 WREG32_P(mmVCE_STATUS, 1, ~1); 127 128 WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, 129 ~VCE_VCPU_CNTL__CLK_EN_MASK); 130 131 WREG32_P(mmVCE_SOFT_RESET, 132 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, 133 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); 134 135 mdelay(100); 136 137 WREG32_P(mmVCE_SOFT_RESET, 0, 138 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); 139 140 for (i = 0; i < 10; ++i) { 141 uint32_t status; 142 for (j = 0; j < 100; ++j) { 143 status = RREG32(mmVCE_STATUS); 144 if (status & 2) 145 break; 146 mdelay(10); 147 } 148 r = 0; 149 if (status & 2) 150 break; 151 152 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); 153 WREG32_P(mmVCE_SOFT_RESET, 154 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, 155 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); 156 mdelay(10); 157 WREG32_P(mmVCE_SOFT_RESET, 0, 158 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); 159 mdelay(10); 160 r = -1; 161 } 162 163 /* clear BUSY flag */ 164 WREG32_P(mmVCE_STATUS, 0, ~1); 165 166 if (r) { 167 DRM_ERROR("VCE not responding, giving up!!!\n"); 168 mutex_unlock(&adev->grbm_idx_mutex); 169 return r; 170 } 171 } 172 173 WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); 174 mutex_unlock(&adev->grbm_idx_mutex); 175 176 ring = &adev->vce.ring[0]; 177 WREG32(mmVCE_RB_RPTR, ring->wptr); 178 WREG32(mmVCE_RB_WPTR, ring->wptr); 179 WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); 180 WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 181 WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); 182 183 ring = &adev->vce.ring[1]; 184 WREG32(mmVCE_RB_RPTR2, ring->wptr); 185 WREG32(mmVCE_RB_WPTR2, ring->wptr); 186 WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); 187 WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 188 WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); 189 190 return 0; 191 } 192 193 static int vce_v3_0_early_init(void *handle) 194 { 195 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 196 197 vce_v3_0_set_ring_funcs(adev); 198 vce_v3_0_set_irq_funcs(adev); 199 200 return 0; 201 } 202 203 static int vce_v3_0_sw_init(void *handle) 204 { 205 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 206 struct amdgpu_ring *ring; 207 int r; 208 209 /* VCE */ 210 r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq); 211 if (r) 212 return r; 213 214 r = amdgpu_vce_sw_init(adev, VCE_V3_0_FW_SIZE + 215 (VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE) * 2); 216 if (r) 217 return r; 218 219 r = amdgpu_vce_resume(adev); 220 if (r) 221 return r; 222 223 ring = &adev->vce.ring[0]; 224 sprintf(ring->name, "vce0"); 225 r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf, 226 &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); 227 if (r) 228 return r; 229 230 ring = &adev->vce.ring[1]; 231 sprintf(ring->name, "vce1"); 232 r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf, 233 &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); 234 if (r) 235 return r; 236 237 return r; 238 } 239 240 static int vce_v3_0_sw_fini(void *handle) 241 { 242 int r; 243 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 244 245 r = amdgpu_vce_suspend(adev); 246 if (r) 247 return r; 248 249 r = amdgpu_vce_sw_fini(adev); 250 if (r) 251 return r; 252 253 return r; 254 } 255 256 static int vce_v3_0_hw_init(void *handle) 257 { 258 struct amdgpu_ring *ring; 259 int r; 260 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 261 262 r = vce_v3_0_start(adev); 263 if (r) 264 return r; 265 266 ring = &adev->vce.ring[0]; 267 ring->ready = true; 268 r = amdgpu_ring_test_ring(ring); 269 if (r) { 270 ring->ready = false; 271 return r; 272 } 273 274 ring = &adev->vce.ring[1]; 275 ring->ready = true; 276 r = amdgpu_ring_test_ring(ring); 277 if (r) { 278 ring->ready = false; 279 return r; 280 } 281 282 DRM_INFO("VCE initialized successfully.\n"); 283 284 return 0; 285 } 286 287 static int vce_v3_0_hw_fini(void *handle) 288 { 289 return 0; 290 } 291 292 static int vce_v3_0_suspend(void *handle) 293 { 294 int r; 295 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 296 297 r = vce_v3_0_hw_fini(adev); 298 if (r) 299 return r; 300 301 r = amdgpu_vce_suspend(adev); 302 if (r) 303 return r; 304 305 return r; 306 } 307 308 static int vce_v3_0_resume(void *handle) 309 { 310 int r; 311 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 312 313 r = amdgpu_vce_resume(adev); 314 if (r) 315 return r; 316 317 r = vce_v3_0_hw_init(adev); 318 if (r) 319 return r; 320 321 return r; 322 } 323 324 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) 325 { 326 uint32_t offset, size; 327 328 WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16)); 329 WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); 330 WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); 331 WREG32(mmVCE_CLOCK_GATING_B, 0xf7); 332 333 WREG32(mmVCE_LMI_CTRL, 0x00398000); 334 WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1); 335 WREG32(mmVCE_LMI_SWAP_CNTL, 0); 336 WREG32(mmVCE_LMI_SWAP_CNTL1, 0); 337 WREG32(mmVCE_LMI_VM_CTRL, 0); 338 339 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8)); 340 offset = AMDGPU_VCE_FIRMWARE_OFFSET; 341 size = VCE_V3_0_FW_SIZE; 342 WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff); 343 WREG32(mmVCE_VCPU_CACHE_SIZE0, size); 344 345 if (idx == 0) { 346 offset += size; 347 size = VCE_V3_0_STACK_SIZE; 348 WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff); 349 WREG32(mmVCE_VCPU_CACHE_SIZE1, size); 350 offset += size; 351 size = VCE_V3_0_DATA_SIZE; 352 WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff); 353 WREG32(mmVCE_VCPU_CACHE_SIZE2, size); 354 } else { 355 offset += size + VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE; 356 size = VCE_V3_0_STACK_SIZE; 357 WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0xfffffff); 358 WREG32(mmVCE_VCPU_CACHE_SIZE1, size); 359 offset += size; 360 size = VCE_V3_0_DATA_SIZE; 361 WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0xfffffff); 362 WREG32(mmVCE_VCPU_CACHE_SIZE2, size); 363 } 364 365 WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); 366 367 WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK, 368 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); 369 } 370 371 static bool vce_v3_0_is_idle(void *handle) 372 { 373 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 374 375 return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK); 376 } 377 378 static int vce_v3_0_wait_for_idle(void *handle) 379 { 380 unsigned i; 381 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 382 383 for (i = 0; i < adev->usec_timeout; i++) { 384 if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK)) 385 return 0; 386 } 387 return -ETIMEDOUT; 388 } 389 390 static int vce_v3_0_soft_reset(void *handle) 391 { 392 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 393 394 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK, 395 ~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK); 396 mdelay(5); 397 398 return vce_v3_0_start(adev); 399 } 400 401 static void vce_v3_0_print_status(void *handle) 402 { 403 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 404 405 dev_info(adev->dev, "VCE 3.0 registers\n"); 406 dev_info(adev->dev, " VCE_STATUS=0x%08X\n", 407 RREG32(mmVCE_STATUS)); 408 dev_info(adev->dev, " VCE_VCPU_CNTL=0x%08X\n", 409 RREG32(mmVCE_VCPU_CNTL)); 410 dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET0=0x%08X\n", 411 RREG32(mmVCE_VCPU_CACHE_OFFSET0)); 412 dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE0=0x%08X\n", 413 RREG32(mmVCE_VCPU_CACHE_SIZE0)); 414 dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET1=0x%08X\n", 415 RREG32(mmVCE_VCPU_CACHE_OFFSET1)); 416 dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE1=0x%08X\n", 417 RREG32(mmVCE_VCPU_CACHE_SIZE1)); 418 dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET2=0x%08X\n", 419 RREG32(mmVCE_VCPU_CACHE_OFFSET2)); 420 dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE2=0x%08X\n", 421 RREG32(mmVCE_VCPU_CACHE_SIZE2)); 422 dev_info(adev->dev, " VCE_SOFT_RESET=0x%08X\n", 423 RREG32(mmVCE_SOFT_RESET)); 424 dev_info(adev->dev, " VCE_RB_BASE_LO2=0x%08X\n", 425 RREG32(mmVCE_RB_BASE_LO2)); 426 dev_info(adev->dev, " VCE_RB_BASE_HI2=0x%08X\n", 427 RREG32(mmVCE_RB_BASE_HI2)); 428 dev_info(adev->dev, " VCE_RB_SIZE2=0x%08X\n", 429 RREG32(mmVCE_RB_SIZE2)); 430 dev_info(adev->dev, " VCE_RB_RPTR2=0x%08X\n", 431 RREG32(mmVCE_RB_RPTR2)); 432 dev_info(adev->dev, " VCE_RB_WPTR2=0x%08X\n", 433 RREG32(mmVCE_RB_WPTR2)); 434 dev_info(adev->dev, " VCE_RB_BASE_LO=0x%08X\n", 435 RREG32(mmVCE_RB_BASE_LO)); 436 dev_info(adev->dev, " VCE_RB_BASE_HI=0x%08X\n", 437 RREG32(mmVCE_RB_BASE_HI)); 438 dev_info(adev->dev, " VCE_RB_SIZE=0x%08X\n", 439 RREG32(mmVCE_RB_SIZE)); 440 dev_info(adev->dev, " VCE_RB_RPTR=0x%08X\n", 441 RREG32(mmVCE_RB_RPTR)); 442 dev_info(adev->dev, " VCE_RB_WPTR=0x%08X\n", 443 RREG32(mmVCE_RB_WPTR)); 444 dev_info(adev->dev, " VCE_CLOCK_GATING_A=0x%08X\n", 445 RREG32(mmVCE_CLOCK_GATING_A)); 446 dev_info(adev->dev, " VCE_CLOCK_GATING_B=0x%08X\n", 447 RREG32(mmVCE_CLOCK_GATING_B)); 448 dev_info(adev->dev, " VCE_UENC_CLOCK_GATING=0x%08X\n", 449 RREG32(mmVCE_UENC_CLOCK_GATING)); 450 dev_info(adev->dev, " VCE_UENC_REG_CLOCK_GATING=0x%08X\n", 451 RREG32(mmVCE_UENC_REG_CLOCK_GATING)); 452 dev_info(adev->dev, " VCE_SYS_INT_EN=0x%08X\n", 453 RREG32(mmVCE_SYS_INT_EN)); 454 dev_info(adev->dev, " VCE_LMI_CTRL2=0x%08X\n", 455 RREG32(mmVCE_LMI_CTRL2)); 456 dev_info(adev->dev, " VCE_LMI_CTRL=0x%08X\n", 457 RREG32(mmVCE_LMI_CTRL)); 458 dev_info(adev->dev, " VCE_LMI_VM_CTRL=0x%08X\n", 459 RREG32(mmVCE_LMI_VM_CTRL)); 460 dev_info(adev->dev, " VCE_LMI_SWAP_CNTL=0x%08X\n", 461 RREG32(mmVCE_LMI_SWAP_CNTL)); 462 dev_info(adev->dev, " VCE_LMI_SWAP_CNTL1=0x%08X\n", 463 RREG32(mmVCE_LMI_SWAP_CNTL1)); 464 dev_info(adev->dev, " VCE_LMI_CACHE_CTRL=0x%08X\n", 465 RREG32(mmVCE_LMI_CACHE_CTRL)); 466 } 467 468 static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev, 469 struct amdgpu_irq_src *source, 470 unsigned type, 471 enum amdgpu_interrupt_state state) 472 { 473 uint32_t val = 0; 474 475 if (state == AMDGPU_IRQ_STATE_ENABLE) 476 val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK; 477 478 WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); 479 return 0; 480 } 481 482 static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, 483 struct amdgpu_irq_src *source, 484 struct amdgpu_iv_entry *entry) 485 { 486 DRM_DEBUG("IH: VCE\n"); 487 switch (entry->src_data) { 488 case 0: 489 amdgpu_fence_process(&adev->vce.ring[0]); 490 break; 491 case 1: 492 amdgpu_fence_process(&adev->vce.ring[1]); 493 break; 494 default: 495 DRM_ERROR("Unhandled interrupt: %d %d\n", 496 entry->src_id, entry->src_data); 497 break; 498 } 499 500 return 0; 501 } 502 503 static int vce_v3_0_set_clockgating_state(void *handle, 504 enum amd_clockgating_state state) 505 { 506 return 0; 507 } 508 509 static int vce_v3_0_set_powergating_state(void *handle, 510 enum amd_powergating_state state) 511 { 512 /* This doesn't actually powergate the VCE block. 513 * That's done in the dpm code via the SMC. This 514 * just re-inits the block as necessary. The actual 515 * gating still happens in the dpm code. We should 516 * revisit this when there is a cleaner line between 517 * the smc and the hw blocks 518 */ 519 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 520 521 if (state == AMD_PG_STATE_GATE) 522 /* XXX do we need a vce_v3_0_stop()? */ 523 return 0; 524 else 525 return vce_v3_0_start(adev); 526 } 527 528 const struct amd_ip_funcs vce_v3_0_ip_funcs = { 529 .early_init = vce_v3_0_early_init, 530 .late_init = NULL, 531 .sw_init = vce_v3_0_sw_init, 532 .sw_fini = vce_v3_0_sw_fini, 533 .hw_init = vce_v3_0_hw_init, 534 .hw_fini = vce_v3_0_hw_fini, 535 .suspend = vce_v3_0_suspend, 536 .resume = vce_v3_0_resume, 537 .is_idle = vce_v3_0_is_idle, 538 .wait_for_idle = vce_v3_0_wait_for_idle, 539 .soft_reset = vce_v3_0_soft_reset, 540 .print_status = vce_v3_0_print_status, 541 .set_clockgating_state = vce_v3_0_set_clockgating_state, 542 .set_powergating_state = vce_v3_0_set_powergating_state, 543 }; 544 545 static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = { 546 .get_rptr = vce_v3_0_ring_get_rptr, 547 .get_wptr = vce_v3_0_ring_get_wptr, 548 .set_wptr = vce_v3_0_ring_set_wptr, 549 .parse_cs = amdgpu_vce_ring_parse_cs, 550 .emit_ib = amdgpu_vce_ring_emit_ib, 551 .emit_fence = amdgpu_vce_ring_emit_fence, 552 .emit_semaphore = amdgpu_vce_ring_emit_semaphore, 553 .test_ring = amdgpu_vce_ring_test_ring, 554 .test_ib = amdgpu_vce_ring_test_ib, 555 .is_lockup = amdgpu_ring_test_lockup, 556 }; 557 558 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev) 559 { 560 adev->vce.ring[0].funcs = &vce_v3_0_ring_funcs; 561 adev->vce.ring[1].funcs = &vce_v3_0_ring_funcs; 562 } 563 564 static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = { 565 .set = vce_v3_0_set_interrupt_state, 566 .process = vce_v3_0_process_interrupt, 567 }; 568 569 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev) 570 { 571 adev->vce.irq.num_types = 1; 572 adev->vce.irq.funcs = &vce_v3_0_irq_funcs; 573 }; 574