1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König <christian.koenig@amd.com> 23 */ 24 25 #include <linux/firmware.h> 26 #include <drm/drmP.h> 27 #include "amdgpu.h" 28 #include "amdgpu_uvd.h" 29 #include "vid.h" 30 #include "uvd/uvd_5_0_d.h" 31 #include "uvd/uvd_5_0_sh_mask.h" 32 #include "oss/oss_2_0_d.h" 33 #include "oss/oss_2_0_sh_mask.h" 34 #include "vi.h" 35 36 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev); 37 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev); 38 static int uvd_v5_0_start(struct amdgpu_device *adev); 39 static void uvd_v5_0_stop(struct amdgpu_device *adev); 40 41 /** 42 * uvd_v5_0_ring_get_rptr - get read pointer 43 * 44 * @ring: amdgpu_ring pointer 45 * 46 * Returns the current hardware read pointer 47 */ 48 static uint32_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring) 49 { 50 struct amdgpu_device *adev = ring->adev; 51 52 return RREG32(mmUVD_RBC_RB_RPTR); 53 } 54 55 /** 56 * uvd_v5_0_ring_get_wptr - get write pointer 57 * 58 * @ring: amdgpu_ring pointer 59 * 60 * Returns the current hardware write pointer 61 */ 62 static uint32_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring) 63 { 64 struct amdgpu_device *adev = ring->adev; 65 66 return RREG32(mmUVD_RBC_RB_WPTR); 67 } 68 69 /** 70 * uvd_v5_0_ring_set_wptr - set write pointer 71 * 72 * @ring: amdgpu_ring pointer 73 * 74 * Commits the write pointer to the hardware 75 */ 76 static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring) 77 { 78 struct amdgpu_device *adev = ring->adev; 79 80 WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); 81 } 82 83 static int uvd_v5_0_early_init(void *handle) 84 { 85 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 86 87 uvd_v5_0_set_ring_funcs(adev); 88 uvd_v5_0_set_irq_funcs(adev); 89 90 return 0; 91 } 92 93 static int uvd_v5_0_sw_init(void *handle) 94 { 95 struct amdgpu_ring *ring; 96 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 97 int r; 98 99 /* UVD TRAP */ 100 r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq); 101 if (r) 102 return r; 103 104 r = amdgpu_uvd_sw_init(adev); 105 if (r) 106 return r; 107 108 r = amdgpu_uvd_resume(adev); 109 if (r) 110 return r; 111 112 ring = &adev->uvd.ring; 113 sprintf(ring->name, "uvd"); 114 r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf, 115 &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); 116 117 return r; 118 } 119 120 static int uvd_v5_0_sw_fini(void *handle) 121 { 122 int r; 123 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 124 125 r = amdgpu_uvd_suspend(adev); 126 if (r) 127 return r; 128 129 r = amdgpu_uvd_sw_fini(adev); 130 if (r) 131 return r; 132 133 return r; 134 } 135 136 /** 137 * uvd_v5_0_hw_init - start and test UVD block 138 * 139 * @adev: amdgpu_device pointer 140 * 141 * Initialize the hardware, boot up the VCPU and do some testing 142 */ 143 static int uvd_v5_0_hw_init(void *handle) 144 { 145 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 146 struct amdgpu_ring *ring = &adev->uvd.ring; 147 uint32_t tmp; 148 int r; 149 150 /* raise clocks while booting up the VCPU */ 151 amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); 152 153 r = uvd_v5_0_start(adev); 154 if (r) 155 goto done; 156 157 ring->ready = true; 158 r = amdgpu_ring_test_ring(ring); 159 if (r) { 160 ring->ready = false; 161 goto done; 162 } 163 164 r = amdgpu_ring_alloc(ring, 10); 165 if (r) { 166 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); 167 goto done; 168 } 169 170 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); 171 amdgpu_ring_write(ring, tmp); 172 amdgpu_ring_write(ring, 0xFFFFF); 173 174 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); 175 amdgpu_ring_write(ring, tmp); 176 amdgpu_ring_write(ring, 0xFFFFF); 177 178 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); 179 amdgpu_ring_write(ring, tmp); 180 amdgpu_ring_write(ring, 0xFFFFF); 181 182 /* Clear timeout status bits */ 183 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); 184 amdgpu_ring_write(ring, 0x8); 185 186 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); 187 amdgpu_ring_write(ring, 3); 188 189 amdgpu_ring_commit(ring); 190 191 done: 192 /* lower clocks again */ 193 amdgpu_asic_set_uvd_clocks(adev, 0, 0); 194 195 if (!r) 196 DRM_INFO("UVD initialized successfully.\n"); 197 198 return r; 199 } 200 201 /** 202 * uvd_v5_0_hw_fini - stop the hardware block 203 * 204 * @adev: amdgpu_device pointer 205 * 206 * Stop the UVD block, mark ring as not ready any more 207 */ 208 static int uvd_v5_0_hw_fini(void *handle) 209 { 210 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 211 struct amdgpu_ring *ring = &adev->uvd.ring; 212 213 uvd_v5_0_stop(adev); 214 ring->ready = false; 215 216 return 0; 217 } 218 219 static int uvd_v5_0_suspend(void *handle) 220 { 221 int r; 222 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 223 224 r = uvd_v5_0_hw_fini(adev); 225 if (r) 226 return r; 227 228 r = amdgpu_uvd_suspend(adev); 229 if (r) 230 return r; 231 232 return r; 233 } 234 235 static int uvd_v5_0_resume(void *handle) 236 { 237 int r; 238 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 239 240 r = amdgpu_uvd_resume(adev); 241 if (r) 242 return r; 243 244 r = uvd_v5_0_hw_init(adev); 245 if (r) 246 return r; 247 248 return r; 249 } 250 251 /** 252 * uvd_v5_0_mc_resume - memory controller programming 253 * 254 * @adev: amdgpu_device pointer 255 * 256 * Let the UVD memory controller know it's offsets 257 */ 258 static void uvd_v5_0_mc_resume(struct amdgpu_device *adev) 259 { 260 uint64_t offset; 261 uint32_t size; 262 263 /* programm memory controller bits 0-27 */ 264 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 265 lower_32_bits(adev->uvd.gpu_addr)); 266 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 267 upper_32_bits(adev->uvd.gpu_addr)); 268 269 offset = AMDGPU_UVD_FIRMWARE_OFFSET; 270 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); 271 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3); 272 WREG32(mmUVD_VCPU_CACHE_SIZE0, size); 273 274 offset += size; 275 size = AMDGPU_UVD_HEAP_SIZE; 276 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3); 277 WREG32(mmUVD_VCPU_CACHE_SIZE1, size); 278 279 offset += size; 280 size = AMDGPU_UVD_STACK_SIZE + 281 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles); 282 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); 283 WREG32(mmUVD_VCPU_CACHE_SIZE2, size); 284 285 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 286 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 287 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 288 } 289 290 /** 291 * uvd_v5_0_start - start UVD block 292 * 293 * @adev: amdgpu_device pointer 294 * 295 * Setup and start the UVD block 296 */ 297 static int uvd_v5_0_start(struct amdgpu_device *adev) 298 { 299 struct amdgpu_ring *ring = &adev->uvd.ring; 300 uint32_t rb_bufsz, tmp; 301 uint32_t lmi_swap_cntl; 302 uint32_t mp_swap_cntl; 303 int i, j, r; 304 305 /*disable DPG */ 306 WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2)); 307 308 /* disable byte swapping */ 309 lmi_swap_cntl = 0; 310 mp_swap_cntl = 0; 311 312 uvd_v5_0_mc_resume(adev); 313 314 /* disable clock gating */ 315 WREG32(mmUVD_CGC_GATE, 0); 316 317 /* disable interupt */ 318 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); 319 320 /* stall UMC and register bus before resetting VCPU */ 321 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 322 mdelay(1); 323 324 /* put LMI, VCPU, RBC etc... into reset */ 325 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | 326 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | 327 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | 328 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | 329 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); 330 mdelay(5); 331 332 /* take UVD block out of reset */ 333 WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); 334 mdelay(5); 335 336 /* initialize UVD memory controller */ 337 WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | 338 (1 << 21) | (1 << 9) | (1 << 20)); 339 340 #ifdef __BIG_ENDIAN 341 /* swap (8 in 32) RB and IB */ 342 lmi_swap_cntl = 0xa; 343 mp_swap_cntl = 0; 344 #endif 345 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); 346 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); 347 348 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); 349 WREG32(mmUVD_MPC_SET_MUXA1, 0x0); 350 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); 351 WREG32(mmUVD_MPC_SET_MUXB1, 0x0); 352 WREG32(mmUVD_MPC_SET_ALU, 0); 353 WREG32(mmUVD_MPC_SET_MUX, 0x88); 354 355 /* take all subblocks out of reset, except VCPU */ 356 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 357 mdelay(5); 358 359 /* enable VCPU clock */ 360 WREG32(mmUVD_VCPU_CNTL, 1 << 9); 361 362 /* enable UMC */ 363 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 364 365 /* boot up the VCPU */ 366 WREG32(mmUVD_SOFT_RESET, 0); 367 mdelay(10); 368 369 for (i = 0; i < 10; ++i) { 370 uint32_t status; 371 for (j = 0; j < 100; ++j) { 372 status = RREG32(mmUVD_STATUS); 373 if (status & 2) 374 break; 375 mdelay(10); 376 } 377 r = 0; 378 if (status & 2) 379 break; 380 381 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); 382 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, 383 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 384 mdelay(10); 385 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 386 mdelay(10); 387 r = -1; 388 } 389 390 if (r) { 391 DRM_ERROR("UVD not responding, giving up!!!\n"); 392 return r; 393 } 394 /* enable master interrupt */ 395 WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1)); 396 397 /* clear the bit 4 of UVD_STATUS */ 398 WREG32_P(mmUVD_STATUS, 0, ~(2 << 1)); 399 400 rb_bufsz = order_base_2(ring->ring_size); 401 tmp = 0; 402 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); 403 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); 404 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); 405 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); 406 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); 407 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); 408 /* force RBC into idle state */ 409 WREG32(mmUVD_RBC_RB_CNTL, tmp); 410 411 /* set the write pointer delay */ 412 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); 413 414 /* set the wb address */ 415 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); 416 417 /* programm the RB_BASE for ring buffer */ 418 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, 419 lower_32_bits(ring->gpu_addr)); 420 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, 421 upper_32_bits(ring->gpu_addr)); 422 423 /* Initialize the ring buffer's read and write pointers */ 424 WREG32(mmUVD_RBC_RB_RPTR, 0); 425 426 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); 427 WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); 428 429 WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); 430 431 return 0; 432 } 433 434 /** 435 * uvd_v5_0_stop - stop UVD block 436 * 437 * @adev: amdgpu_device pointer 438 * 439 * stop the UVD block 440 */ 441 static void uvd_v5_0_stop(struct amdgpu_device *adev) 442 { 443 /* force RBC into idle state */ 444 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); 445 446 /* Stall UMC and register bus before resetting VCPU */ 447 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 448 mdelay(1); 449 450 /* put VCPU into reset */ 451 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 452 mdelay(5); 453 454 /* disable VCPU clock */ 455 WREG32(mmUVD_VCPU_CNTL, 0x0); 456 457 /* Unstall UMC and register bus */ 458 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 459 } 460 461 /** 462 * uvd_v5_0_ring_emit_fence - emit an fence & trap command 463 * 464 * @ring: amdgpu_ring pointer 465 * @fence: fence to emit 466 * 467 * Write a fence and a trap command to the ring. 468 */ 469 static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 470 unsigned flags) 471 { 472 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 473 474 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 475 amdgpu_ring_write(ring, seq); 476 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 477 amdgpu_ring_write(ring, addr & 0xffffffff); 478 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 479 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); 480 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 481 amdgpu_ring_write(ring, 0); 482 483 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 484 amdgpu_ring_write(ring, 0); 485 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 486 amdgpu_ring_write(ring, 0); 487 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 488 amdgpu_ring_write(ring, 2); 489 } 490 491 /** 492 * uvd_v5_0_ring_test_ring - register write test 493 * 494 * @ring: amdgpu_ring pointer 495 * 496 * Test if we can successfully write to the context register 497 */ 498 static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) 499 { 500 struct amdgpu_device *adev = ring->adev; 501 uint32_t tmp = 0; 502 unsigned i; 503 int r; 504 505 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); 506 r = amdgpu_ring_alloc(ring, 3); 507 if (r) { 508 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 509 ring->idx, r); 510 return r; 511 } 512 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 513 amdgpu_ring_write(ring, 0xDEADBEEF); 514 amdgpu_ring_commit(ring); 515 for (i = 0; i < adev->usec_timeout; i++) { 516 tmp = RREG32(mmUVD_CONTEXT_ID); 517 if (tmp == 0xDEADBEEF) 518 break; 519 DRM_UDELAY(1); 520 } 521 522 if (i < adev->usec_timeout) { 523 DRM_INFO("ring test on %d succeeded in %d usecs\n", 524 ring->idx, i); 525 } else { 526 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", 527 ring->idx, tmp); 528 r = -EINVAL; 529 } 530 return r; 531 } 532 533 /** 534 * uvd_v5_0_ring_emit_ib - execute indirect buffer 535 * 536 * @ring: amdgpu_ring pointer 537 * @ib: indirect buffer to execute 538 * 539 * Write ring commands to execute the indirect buffer 540 */ 541 static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, 542 struct amdgpu_ib *ib, 543 unsigned vm_id, bool ctx_switch) 544 { 545 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); 546 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 547 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0)); 548 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 549 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); 550 amdgpu_ring_write(ring, ib->length_dw); 551 } 552 553 /** 554 * uvd_v5_0_ring_test_ib - test ib execution 555 * 556 * @ring: amdgpu_ring pointer 557 * 558 * Test if we can successfully execute an IB 559 */ 560 static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring) 561 { 562 struct amdgpu_device *adev = ring->adev; 563 struct fence *fence = NULL; 564 int r; 565 566 r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); 567 if (r) { 568 DRM_ERROR("amdgpu: failed to raise UVD clocks (%d).\n", r); 569 return r; 570 } 571 572 r = amdgpu_uvd_get_create_msg(ring, 1, NULL); 573 if (r) { 574 DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); 575 goto error; 576 } 577 578 r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); 579 if (r) { 580 DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); 581 goto error; 582 } 583 584 r = fence_wait(fence, false); 585 if (r) { 586 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 587 goto error; 588 } 589 DRM_INFO("ib test on ring %d succeeded\n", ring->idx); 590 error: 591 fence_put(fence); 592 amdgpu_asic_set_uvd_clocks(adev, 0, 0); 593 return r; 594 } 595 596 static bool uvd_v5_0_is_idle(void *handle) 597 { 598 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 599 600 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); 601 } 602 603 static int uvd_v5_0_wait_for_idle(void *handle) 604 { 605 unsigned i; 606 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 607 608 for (i = 0; i < adev->usec_timeout; i++) { 609 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) 610 return 0; 611 } 612 return -ETIMEDOUT; 613 } 614 615 static int uvd_v5_0_soft_reset(void *handle) 616 { 617 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 618 619 uvd_v5_0_stop(adev); 620 621 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, 622 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); 623 mdelay(5); 624 625 return uvd_v5_0_start(adev); 626 } 627 628 static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev, 629 struct amdgpu_irq_src *source, 630 unsigned type, 631 enum amdgpu_interrupt_state state) 632 { 633 // TODO 634 return 0; 635 } 636 637 static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev, 638 struct amdgpu_irq_src *source, 639 struct amdgpu_iv_entry *entry) 640 { 641 DRM_DEBUG("IH: UVD TRAP\n"); 642 amdgpu_fence_process(&adev->uvd.ring); 643 return 0; 644 } 645 646 static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev) 647 { 648 uint32_t data, data1, data2, suvd_flags; 649 650 data = RREG32(mmUVD_CGC_CTRL); 651 data1 = RREG32(mmUVD_SUVD_CGC_GATE); 652 data2 = RREG32(mmUVD_SUVD_CGC_CTRL); 653 654 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | 655 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); 656 657 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | 658 UVD_SUVD_CGC_GATE__SIT_MASK | 659 UVD_SUVD_CGC_GATE__SMP_MASK | 660 UVD_SUVD_CGC_GATE__SCM_MASK | 661 UVD_SUVD_CGC_GATE__SDB_MASK; 662 663 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | 664 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) | 665 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY)); 666 667 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | 668 UVD_CGC_CTRL__UDEC_CM_MODE_MASK | 669 UVD_CGC_CTRL__UDEC_IT_MODE_MASK | 670 UVD_CGC_CTRL__UDEC_DB_MODE_MASK | 671 UVD_CGC_CTRL__UDEC_MP_MODE_MASK | 672 UVD_CGC_CTRL__SYS_MODE_MASK | 673 UVD_CGC_CTRL__UDEC_MODE_MASK | 674 UVD_CGC_CTRL__MPEG2_MODE_MASK | 675 UVD_CGC_CTRL__REGS_MODE_MASK | 676 UVD_CGC_CTRL__RBC_MODE_MASK | 677 UVD_CGC_CTRL__LMI_MC_MODE_MASK | 678 UVD_CGC_CTRL__LMI_UMC_MODE_MASK | 679 UVD_CGC_CTRL__IDCT_MODE_MASK | 680 UVD_CGC_CTRL__MPRD_MODE_MASK | 681 UVD_CGC_CTRL__MPC_MODE_MASK | 682 UVD_CGC_CTRL__LBSI_MODE_MASK | 683 UVD_CGC_CTRL__LRBBM_MODE_MASK | 684 UVD_CGC_CTRL__WCB_MODE_MASK | 685 UVD_CGC_CTRL__VCPU_MODE_MASK | 686 UVD_CGC_CTRL__JPEG_MODE_MASK | 687 UVD_CGC_CTRL__SCPU_MODE_MASK); 688 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | 689 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | 690 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK | 691 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK | 692 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK); 693 data1 |= suvd_flags; 694 695 WREG32(mmUVD_CGC_CTRL, data); 696 WREG32(mmUVD_CGC_GATE, 0); 697 WREG32(mmUVD_SUVD_CGC_GATE, data1); 698 WREG32(mmUVD_SUVD_CGC_CTRL, data2); 699 } 700 701 #if 0 702 static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev) 703 { 704 uint32_t data, data1, cgc_flags, suvd_flags; 705 706 data = RREG32(mmUVD_CGC_GATE); 707 data1 = RREG32(mmUVD_SUVD_CGC_GATE); 708 709 cgc_flags = UVD_CGC_GATE__SYS_MASK | 710 UVD_CGC_GATE__UDEC_MASK | 711 UVD_CGC_GATE__MPEG2_MASK | 712 UVD_CGC_GATE__RBC_MASK | 713 UVD_CGC_GATE__LMI_MC_MASK | 714 UVD_CGC_GATE__IDCT_MASK | 715 UVD_CGC_GATE__MPRD_MASK | 716 UVD_CGC_GATE__MPC_MASK | 717 UVD_CGC_GATE__LBSI_MASK | 718 UVD_CGC_GATE__LRBBM_MASK | 719 UVD_CGC_GATE__UDEC_RE_MASK | 720 UVD_CGC_GATE__UDEC_CM_MASK | 721 UVD_CGC_GATE__UDEC_IT_MASK | 722 UVD_CGC_GATE__UDEC_DB_MASK | 723 UVD_CGC_GATE__UDEC_MP_MASK | 724 UVD_CGC_GATE__WCB_MASK | 725 UVD_CGC_GATE__VCPU_MASK | 726 UVD_CGC_GATE__SCPU_MASK; 727 728 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | 729 UVD_SUVD_CGC_GATE__SIT_MASK | 730 UVD_SUVD_CGC_GATE__SMP_MASK | 731 UVD_SUVD_CGC_GATE__SCM_MASK | 732 UVD_SUVD_CGC_GATE__SDB_MASK; 733 734 data |= cgc_flags; 735 data1 |= suvd_flags; 736 737 WREG32(mmUVD_CGC_GATE, data); 738 WREG32(mmUVD_SUVD_CGC_GATE, data1); 739 } 740 #endif 741 742 static int uvd_v5_0_set_clockgating_state(void *handle, 743 enum amd_clockgating_state state) 744 { 745 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 746 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 747 static int curstate = -1; 748 749 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) 750 return 0; 751 752 if (curstate == state) 753 return 0; 754 755 curstate = state; 756 if (enable) { 757 /* disable HW gating and enable Sw gating */ 758 uvd_v5_0_set_sw_clock_gating(adev); 759 } else { 760 /* wait for STATUS to clear */ 761 if (uvd_v5_0_wait_for_idle(handle)) 762 return -EBUSY; 763 764 /* enable HW gates because UVD is idle */ 765 /* uvd_v5_0_set_hw_clock_gating(adev); */ 766 } 767 768 return 0; 769 } 770 771 static int uvd_v5_0_set_powergating_state(void *handle, 772 enum amd_powergating_state state) 773 { 774 /* This doesn't actually powergate the UVD block. 775 * That's done in the dpm code via the SMC. This 776 * just re-inits the block as necessary. The actual 777 * gating still happens in the dpm code. We should 778 * revisit this when there is a cleaner line between 779 * the smc and the hw blocks 780 */ 781 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 782 783 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) 784 return 0; 785 786 if (state == AMD_PG_STATE_GATE) { 787 uvd_v5_0_stop(adev); 788 return 0; 789 } else { 790 return uvd_v5_0_start(adev); 791 } 792 } 793 794 const struct amd_ip_funcs uvd_v5_0_ip_funcs = { 795 .name = "uvd_v5_0", 796 .early_init = uvd_v5_0_early_init, 797 .late_init = NULL, 798 .sw_init = uvd_v5_0_sw_init, 799 .sw_fini = uvd_v5_0_sw_fini, 800 .hw_init = uvd_v5_0_hw_init, 801 .hw_fini = uvd_v5_0_hw_fini, 802 .suspend = uvd_v5_0_suspend, 803 .resume = uvd_v5_0_resume, 804 .is_idle = uvd_v5_0_is_idle, 805 .wait_for_idle = uvd_v5_0_wait_for_idle, 806 .soft_reset = uvd_v5_0_soft_reset, 807 .set_clockgating_state = uvd_v5_0_set_clockgating_state, 808 .set_powergating_state = uvd_v5_0_set_powergating_state, 809 }; 810 811 static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { 812 .get_rptr = uvd_v5_0_ring_get_rptr, 813 .get_wptr = uvd_v5_0_ring_get_wptr, 814 .set_wptr = uvd_v5_0_ring_set_wptr, 815 .parse_cs = amdgpu_uvd_ring_parse_cs, 816 .emit_ib = uvd_v5_0_ring_emit_ib, 817 .emit_fence = uvd_v5_0_ring_emit_fence, 818 .test_ring = uvd_v5_0_ring_test_ring, 819 .test_ib = uvd_v5_0_ring_test_ib, 820 .insert_nop = amdgpu_ring_insert_nop, 821 .pad_ib = amdgpu_ring_generic_pad_ib, 822 }; 823 824 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) 825 { 826 adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs; 827 } 828 829 static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = { 830 .set = uvd_v5_0_set_interrupt_state, 831 .process = uvd_v5_0_process_interrupt, 832 }; 833 834 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev) 835 { 836 adev->uvd.irq.num_types = 1; 837 adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs; 838 } 839