1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 * Authors: Christian König <christian.koenig@amd.com> 26 */ 27 28 #include <linux/firmware.h> 29 #include <drm/drmP.h> 30 #include "amdgpu.h" 31 #include "amdgpu_vce.h" 32 #include "vid.h" 33 #include "vce/vce_3_0_d.h" 34 #include "vce/vce_3_0_sh_mask.h" 35 #include "oss/oss_3_0_d.h" 36 #include "oss/oss_3_0_sh_mask.h" 37 #include "gca/gfx_8_0_d.h" 38 #include "smu/smu_7_1_2_d.h" 39 #include "smu/smu_7_1_2_sh_mask.h" 40 41 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 42 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 43 44 #define VCE_V3_0_FW_SIZE (384 * 1024) 45 #define VCE_V3_0_STACK_SIZE (64 * 1024) 46 #define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024)) 47 48 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); 49 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); 50 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); 51 52 /** 53 * vce_v3_0_ring_get_rptr - get read pointer 54 * 55 * @ring: amdgpu_ring pointer 56 * 57 * Returns the current hardware read pointer 58 */ 59 static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) 60 { 61 struct amdgpu_device *adev = ring->adev; 62 63 if (ring == &adev->vce.ring[0]) 64 return RREG32(mmVCE_RB_RPTR); 65 else 66 return RREG32(mmVCE_RB_RPTR2); 67 } 68 69 /** 70 * vce_v3_0_ring_get_wptr - get write pointer 71 * 72 * @ring: amdgpu_ring pointer 73 * 74 * Returns the current hardware write pointer 75 */ 76 static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring) 77 { 78 struct amdgpu_device *adev = ring->adev; 79 80 if (ring == &adev->vce.ring[0]) 81 return RREG32(mmVCE_RB_WPTR); 82 else 83 return RREG32(mmVCE_RB_WPTR2); 84 } 85 86 /** 87 * vce_v3_0_ring_set_wptr - set write pointer 88 * 89 * @ring: amdgpu_ring pointer 90 * 91 * Commits the write pointer to the hardware 92 */ 93 static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring) 94 { 95 struct amdgpu_device *adev = ring->adev; 96 97 if (ring == &adev->vce.ring[0]) 98 WREG32(mmVCE_RB_WPTR, ring->wptr); 99 else 100 WREG32(mmVCE_RB_WPTR2, ring->wptr); 101 } 102 103 /** 104 * vce_v3_0_start - start VCE block 105 * 106 * @adev: amdgpu_device pointer 107 * 108 * Setup and start the VCE block 109 */ 110 static int vce_v3_0_start(struct amdgpu_device *adev) 111 { 112 struct amdgpu_ring *ring; 113 int idx, i, j, r; 114 115 mutex_lock(&adev->grbm_idx_mutex); 116 for (idx = 0; idx < 2; ++idx) { 117 118 if (adev->vce.harvest_config & (1 << idx)) 119 continue; 120 121 if(idx == 0) 122 WREG32_P(mmGRBM_GFX_INDEX, 0, 123 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); 124 else 125 WREG32_P(mmGRBM_GFX_INDEX, 126 GRBM_GFX_INDEX__VCE_INSTANCE_MASK, 127 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); 128 129 vce_v3_0_mc_resume(adev, idx); 130 131 /* set BUSY flag */ 132 WREG32_P(mmVCE_STATUS, 1, ~1); 133 134 WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, 135 ~VCE_VCPU_CNTL__CLK_EN_MASK); 136 137 WREG32_P(mmVCE_SOFT_RESET, 138 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, 139 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); 140 141 mdelay(100); 142 143 WREG32_P(mmVCE_SOFT_RESET, 0, 144 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); 145 146 for (i = 0; i < 10; ++i) { 147 uint32_t status; 148 for (j = 0; j < 100; ++j) { 149 status = RREG32(mmVCE_STATUS); 150 if (status & 2) 151 break; 152 mdelay(10); 153 } 154 r = 0; 155 if (status & 2) 156 break; 157 158 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); 159 WREG32_P(mmVCE_SOFT_RESET, 160 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, 161 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); 162 mdelay(10); 163 WREG32_P(mmVCE_SOFT_RESET, 0, 164 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); 165 mdelay(10); 166 r = -1; 167 } 168 169 /* clear BUSY flag */ 170 WREG32_P(mmVCE_STATUS, 0, ~1); 171 172 if (r) { 173 DRM_ERROR("VCE not responding, giving up!!!\n"); 174 mutex_unlock(&adev->grbm_idx_mutex); 175 return r; 176 } 177 } 178 179 WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); 180 mutex_unlock(&adev->grbm_idx_mutex); 181 182 ring = &adev->vce.ring[0]; 183 WREG32(mmVCE_RB_RPTR, ring->wptr); 184 WREG32(mmVCE_RB_WPTR, ring->wptr); 185 WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); 186 WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 187 WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); 188 189 ring = &adev->vce.ring[1]; 190 WREG32(mmVCE_RB_RPTR2, ring->wptr); 191 WREG32(mmVCE_RB_WPTR2, ring->wptr); 192 WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); 193 WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 194 WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); 195 196 return 0; 197 } 198 199 #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074 200 #define VCE_HARVEST_FUSE_MACRO__SHIFT 27 201 #define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000 202 203 static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) 204 { 205 u32 tmp; 206 unsigned ret; 207 208 /* Fiji is single pipe */ 209 if (adev->asic_type == CHIP_FIJI) { 210 ret = AMDGPU_VCE_HARVEST_VCE1; 211 return ret; 212 } 213 214 /* Tonga and CZ are dual or single pipe */ 215 if (adev->flags & AMD_IS_APU) 216 tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) & 217 VCE_HARVEST_FUSE_MACRO__MASK) >> 218 VCE_HARVEST_FUSE_MACRO__SHIFT; 219 else 220 tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) & 221 CC_HARVEST_FUSES__VCE_DISABLE_MASK) >> 222 CC_HARVEST_FUSES__VCE_DISABLE__SHIFT; 223 224 switch (tmp) { 225 case 1: 226 ret = AMDGPU_VCE_HARVEST_VCE0; 227 break; 228 case 2: 229 ret = AMDGPU_VCE_HARVEST_VCE1; 230 break; 231 case 3: 232 ret = AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1; 233 break; 234 default: 235 ret = 0; 236 } 237 238 return ret; 239 } 240 241 static int vce_v3_0_early_init(void *handle) 242 { 243 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 244 245 adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev); 246 247 if ((adev->vce.harvest_config & 248 (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) == 249 (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) 250 return -ENOENT; 251 252 vce_v3_0_set_ring_funcs(adev); 253 vce_v3_0_set_irq_funcs(adev); 254 255 return 0; 256 } 257 258 static int vce_v3_0_sw_init(void *handle) 259 { 260 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 261 struct amdgpu_ring *ring; 262 int r; 263 264 /* VCE */ 265 r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq); 266 if (r) 267 return r; 268 269 r = amdgpu_vce_sw_init(adev, VCE_V3_0_FW_SIZE + 270 (VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE) * 2); 271 if (r) 272 return r; 273 274 r = amdgpu_vce_resume(adev); 275 if (r) 276 return r; 277 278 ring = &adev->vce.ring[0]; 279 sprintf(ring->name, "vce0"); 280 r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf, 281 &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); 282 if (r) 283 return r; 284 285 ring = &adev->vce.ring[1]; 286 sprintf(ring->name, "vce1"); 287 r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf, 288 &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); 289 if (r) 290 return r; 291 292 return r; 293 } 294 295 static int vce_v3_0_sw_fini(void *handle) 296 { 297 int r; 298 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 299 300 r = amdgpu_vce_suspend(adev); 301 if (r) 302 return r; 303 304 r = amdgpu_vce_sw_fini(adev); 305 if (r) 306 return r; 307 308 return r; 309 } 310 311 static int vce_v3_0_hw_init(void *handle) 312 { 313 struct amdgpu_ring *ring; 314 int r; 315 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 316 317 r = vce_v3_0_start(adev); 318 if (r) 319 return r; 320 321 ring = &adev->vce.ring[0]; 322 ring->ready = true; 323 r = amdgpu_ring_test_ring(ring); 324 if (r) { 325 ring->ready = false; 326 return r; 327 } 328 329 ring = &adev->vce.ring[1]; 330 ring->ready = true; 331 r = amdgpu_ring_test_ring(ring); 332 if (r) { 333 ring->ready = false; 334 return r; 335 } 336 337 DRM_INFO("VCE initialized successfully.\n"); 338 339 return 0; 340 } 341 342 static int vce_v3_0_hw_fini(void *handle) 343 { 344 return 0; 345 } 346 347 static int vce_v3_0_suspend(void *handle) 348 { 349 int r; 350 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 351 352 r = vce_v3_0_hw_fini(adev); 353 if (r) 354 return r; 355 356 r = amdgpu_vce_suspend(adev); 357 if (r) 358 return r; 359 360 return r; 361 } 362 363 static int vce_v3_0_resume(void *handle) 364 { 365 int r; 366 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 367 368 r = amdgpu_vce_resume(adev); 369 if (r) 370 return r; 371 372 r = vce_v3_0_hw_init(adev); 373 if (r) 374 return r; 375 376 return r; 377 } 378 379 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) 380 { 381 uint32_t offset, size; 382 383 WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16)); 384 WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); 385 WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); 386 WREG32(mmVCE_CLOCK_GATING_B, 0xf7); 387 388 WREG32(mmVCE_LMI_CTRL, 0x00398000); 389 WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1); 390 WREG32(mmVCE_LMI_SWAP_CNTL, 0); 391 WREG32(mmVCE_LMI_SWAP_CNTL1, 0); 392 WREG32(mmVCE_LMI_VM_CTRL, 0); 393 394 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8)); 395 offset = AMDGPU_VCE_FIRMWARE_OFFSET; 396 size = VCE_V3_0_FW_SIZE; 397 WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff); 398 WREG32(mmVCE_VCPU_CACHE_SIZE0, size); 399 400 if (idx == 0) { 401 offset += size; 402 size = VCE_V3_0_STACK_SIZE; 403 WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff); 404 WREG32(mmVCE_VCPU_CACHE_SIZE1, size); 405 offset += size; 406 size = VCE_V3_0_DATA_SIZE; 407 WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff); 408 WREG32(mmVCE_VCPU_CACHE_SIZE2, size); 409 } else { 410 offset += size + VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE; 411 size = VCE_V3_0_STACK_SIZE; 412 WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0xfffffff); 413 WREG32(mmVCE_VCPU_CACHE_SIZE1, size); 414 offset += size; 415 size = VCE_V3_0_DATA_SIZE; 416 WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0xfffffff); 417 WREG32(mmVCE_VCPU_CACHE_SIZE2, size); 418 } 419 420 WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); 421 422 WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK, 423 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); 424 } 425 426 static bool vce_v3_0_is_idle(void *handle) 427 { 428 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 429 u32 mask = 0; 430 int idx; 431 432 for (idx = 0; idx < 2; ++idx) { 433 if (adev->vce.harvest_config & (1 << idx)) 434 continue; 435 436 if (idx == 0) 437 mask |= SRBM_STATUS2__VCE0_BUSY_MASK; 438 else 439 mask |= SRBM_STATUS2__VCE1_BUSY_MASK; 440 } 441 442 return !(RREG32(mmSRBM_STATUS2) & mask); 443 } 444 445 static int vce_v3_0_wait_for_idle(void *handle) 446 { 447 unsigned i; 448 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 449 u32 mask = 0; 450 int idx; 451 452 for (idx = 0; idx < 2; ++idx) { 453 if (adev->vce.harvest_config & (1 << idx)) 454 continue; 455 456 if (idx == 0) 457 mask |= SRBM_STATUS2__VCE0_BUSY_MASK; 458 else 459 mask |= SRBM_STATUS2__VCE1_BUSY_MASK; 460 } 461 462 for (i = 0; i < adev->usec_timeout; i++) { 463 if (!(RREG32(mmSRBM_STATUS2) & mask)) 464 return 0; 465 } 466 return -ETIMEDOUT; 467 } 468 469 static int vce_v3_0_soft_reset(void *handle) 470 { 471 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 472 u32 mask = 0; 473 int idx; 474 475 for (idx = 0; idx < 2; ++idx) { 476 if (adev->vce.harvest_config & (1 << idx)) 477 continue; 478 479 if (idx == 0) 480 mask |= SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK; 481 else 482 mask |= SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK; 483 } 484 WREG32_P(mmSRBM_SOFT_RESET, mask, 485 ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK | 486 SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK)); 487 mdelay(5); 488 489 return vce_v3_0_start(adev); 490 } 491 492 static void vce_v3_0_print_status(void *handle) 493 { 494 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 495 496 dev_info(adev->dev, "VCE 3.0 registers\n"); 497 dev_info(adev->dev, " VCE_STATUS=0x%08X\n", 498 RREG32(mmVCE_STATUS)); 499 dev_info(adev->dev, " VCE_VCPU_CNTL=0x%08X\n", 500 RREG32(mmVCE_VCPU_CNTL)); 501 dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET0=0x%08X\n", 502 RREG32(mmVCE_VCPU_CACHE_OFFSET0)); 503 dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE0=0x%08X\n", 504 RREG32(mmVCE_VCPU_CACHE_SIZE0)); 505 dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET1=0x%08X\n", 506 RREG32(mmVCE_VCPU_CACHE_OFFSET1)); 507 dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE1=0x%08X\n", 508 RREG32(mmVCE_VCPU_CACHE_SIZE1)); 509 dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET2=0x%08X\n", 510 RREG32(mmVCE_VCPU_CACHE_OFFSET2)); 511 dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE2=0x%08X\n", 512 RREG32(mmVCE_VCPU_CACHE_SIZE2)); 513 dev_info(adev->dev, " VCE_SOFT_RESET=0x%08X\n", 514 RREG32(mmVCE_SOFT_RESET)); 515 dev_info(adev->dev, " VCE_RB_BASE_LO2=0x%08X\n", 516 RREG32(mmVCE_RB_BASE_LO2)); 517 dev_info(adev->dev, " VCE_RB_BASE_HI2=0x%08X\n", 518 RREG32(mmVCE_RB_BASE_HI2)); 519 dev_info(adev->dev, " VCE_RB_SIZE2=0x%08X\n", 520 RREG32(mmVCE_RB_SIZE2)); 521 dev_info(adev->dev, " VCE_RB_RPTR2=0x%08X\n", 522 RREG32(mmVCE_RB_RPTR2)); 523 dev_info(adev->dev, " VCE_RB_WPTR2=0x%08X\n", 524 RREG32(mmVCE_RB_WPTR2)); 525 dev_info(adev->dev, " VCE_RB_BASE_LO=0x%08X\n", 526 RREG32(mmVCE_RB_BASE_LO)); 527 dev_info(adev->dev, " VCE_RB_BASE_HI=0x%08X\n", 528 RREG32(mmVCE_RB_BASE_HI)); 529 dev_info(adev->dev, " VCE_RB_SIZE=0x%08X\n", 530 RREG32(mmVCE_RB_SIZE)); 531 dev_info(adev->dev, " VCE_RB_RPTR=0x%08X\n", 532 RREG32(mmVCE_RB_RPTR)); 533 dev_info(adev->dev, " VCE_RB_WPTR=0x%08X\n", 534 RREG32(mmVCE_RB_WPTR)); 535 dev_info(adev->dev, " VCE_CLOCK_GATING_A=0x%08X\n", 536 RREG32(mmVCE_CLOCK_GATING_A)); 537 dev_info(adev->dev, " VCE_CLOCK_GATING_B=0x%08X\n", 538 RREG32(mmVCE_CLOCK_GATING_B)); 539 dev_info(adev->dev, " VCE_UENC_CLOCK_GATING=0x%08X\n", 540 RREG32(mmVCE_UENC_CLOCK_GATING)); 541 dev_info(adev->dev, " VCE_UENC_REG_CLOCK_GATING=0x%08X\n", 542 RREG32(mmVCE_UENC_REG_CLOCK_GATING)); 543 dev_info(adev->dev, " VCE_SYS_INT_EN=0x%08X\n", 544 RREG32(mmVCE_SYS_INT_EN)); 545 dev_info(adev->dev, " VCE_LMI_CTRL2=0x%08X\n", 546 RREG32(mmVCE_LMI_CTRL2)); 547 dev_info(adev->dev, " VCE_LMI_CTRL=0x%08X\n", 548 RREG32(mmVCE_LMI_CTRL)); 549 dev_info(adev->dev, " VCE_LMI_VM_CTRL=0x%08X\n", 550 RREG32(mmVCE_LMI_VM_CTRL)); 551 dev_info(adev->dev, " VCE_LMI_SWAP_CNTL=0x%08X\n", 552 RREG32(mmVCE_LMI_SWAP_CNTL)); 553 dev_info(adev->dev, " VCE_LMI_SWAP_CNTL1=0x%08X\n", 554 RREG32(mmVCE_LMI_SWAP_CNTL1)); 555 dev_info(adev->dev, " VCE_LMI_CACHE_CTRL=0x%08X\n", 556 RREG32(mmVCE_LMI_CACHE_CTRL)); 557 } 558 559 static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev, 560 struct amdgpu_irq_src *source, 561 unsigned type, 562 enum amdgpu_interrupt_state state) 563 { 564 uint32_t val = 0; 565 566 if (state == AMDGPU_IRQ_STATE_ENABLE) 567 val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK; 568 569 WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); 570 return 0; 571 } 572 573 static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, 574 struct amdgpu_irq_src *source, 575 struct amdgpu_iv_entry *entry) 576 { 577 DRM_DEBUG("IH: VCE\n"); 578 switch (entry->src_data) { 579 case 0: 580 amdgpu_fence_process(&adev->vce.ring[0]); 581 break; 582 case 1: 583 amdgpu_fence_process(&adev->vce.ring[1]); 584 break; 585 default: 586 DRM_ERROR("Unhandled interrupt: %d %d\n", 587 entry->src_id, entry->src_data); 588 break; 589 } 590 591 return 0; 592 } 593 594 static int vce_v3_0_set_clockgating_state(void *handle, 595 enum amd_clockgating_state state) 596 { 597 return 0; 598 } 599 600 static int vce_v3_0_set_powergating_state(void *handle, 601 enum amd_powergating_state state) 602 { 603 /* This doesn't actually powergate the VCE block. 604 * That's done in the dpm code via the SMC. This 605 * just re-inits the block as necessary. The actual 606 * gating still happens in the dpm code. We should 607 * revisit this when there is a cleaner line between 608 * the smc and the hw blocks 609 */ 610 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 611 612 if (state == AMD_PG_STATE_GATE) 613 /* XXX do we need a vce_v3_0_stop()? */ 614 return 0; 615 else 616 return vce_v3_0_start(adev); 617 } 618 619 const struct amd_ip_funcs vce_v3_0_ip_funcs = { 620 .early_init = vce_v3_0_early_init, 621 .late_init = NULL, 622 .sw_init = vce_v3_0_sw_init, 623 .sw_fini = vce_v3_0_sw_fini, 624 .hw_init = vce_v3_0_hw_init, 625 .hw_fini = vce_v3_0_hw_fini, 626 .suspend = vce_v3_0_suspend, 627 .resume = vce_v3_0_resume, 628 .is_idle = vce_v3_0_is_idle, 629 .wait_for_idle = vce_v3_0_wait_for_idle, 630 .soft_reset = vce_v3_0_soft_reset, 631 .print_status = vce_v3_0_print_status, 632 .set_clockgating_state = vce_v3_0_set_clockgating_state, 633 .set_powergating_state = vce_v3_0_set_powergating_state, 634 }; 635 636 static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = { 637 .get_rptr = vce_v3_0_ring_get_rptr, 638 .get_wptr = vce_v3_0_ring_get_wptr, 639 .set_wptr = vce_v3_0_ring_set_wptr, 640 .parse_cs = amdgpu_vce_ring_parse_cs, 641 .emit_ib = amdgpu_vce_ring_emit_ib, 642 .emit_fence = amdgpu_vce_ring_emit_fence, 643 .emit_semaphore = amdgpu_vce_ring_emit_semaphore, 644 .test_ring = amdgpu_vce_ring_test_ring, 645 .test_ib = amdgpu_vce_ring_test_ib, 646 .is_lockup = amdgpu_ring_test_lockup, 647 .insert_nop = amdgpu_ring_insert_nop, 648 }; 649 650 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev) 651 { 652 adev->vce.ring[0].funcs = &vce_v3_0_ring_funcs; 653 adev->vce.ring[1].funcs = &vce_v3_0_ring_funcs; 654 } 655 656 static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = { 657 .set = vce_v3_0_set_interrupt_state, 658 .process = vce_v3_0_process_interrupt, 659 }; 660 661 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev) 662 { 663 adev->vce.irq.num_types = 1; 664 adev->vce.irq.funcs = &vce_v3_0_irq_funcs; 665 }; 666