1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Sonny Jiang <sonny.jiang@amd.com> 23 */ 24 25 #include <linux/firmware.h> 26 27 #include "amdgpu.h" 28 #include "amdgpu_uvd.h" 29 #include "sid.h" 30 31 #include "uvd/uvd_3_1_d.h" 32 #include "uvd/uvd_3_1_sh_mask.h" 33 34 #include "oss/oss_1_0_d.h" 35 #include "oss/oss_1_0_sh_mask.h" 36 37 /** 38 * uvd_v3_1_ring_get_rptr - get read pointer 39 * 40 * @ring: amdgpu_ring pointer 41 * 42 * Returns the current hardware read pointer 43 */ 44 static uint64_t uvd_v3_1_ring_get_rptr(struct amdgpu_ring *ring) 45 { 46 struct amdgpu_device *adev = ring->adev; 47 48 return RREG32(mmUVD_RBC_RB_RPTR); 49 } 50 51 /** 52 * uvd_v3_1_ring_get_wptr - get write pointer 53 * 54 * @ring: amdgpu_ring pointer 55 * 56 * Returns the current hardware write pointer 57 */ 58 static uint64_t uvd_v3_1_ring_get_wptr(struct amdgpu_ring *ring) 59 { 60 struct amdgpu_device *adev = ring->adev; 61 62 return RREG32(mmUVD_RBC_RB_WPTR); 63 } 64 65 /** 66 * uvd_v3_1_ring_set_wptr - set write pointer 67 * 68 * @ring: amdgpu_ring pointer 69 * 70 * Commits the write pointer to the hardware 71 */ 72 static void uvd_v3_1_ring_set_wptr(struct amdgpu_ring *ring) 73 { 74 struct amdgpu_device *adev = ring->adev; 75 76 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); 77 } 78 79 /** 80 * uvd_v3_1_ring_emit_ib - execute indirect buffer 81 * 82 * @ring: amdgpu_ring pointer 83 * @job: iob associated with the indirect buffer 84 * @ib: indirect buffer to execute 85 * @flags: flags associated with the indirect buffer 86 * 87 * Write ring commands to execute the indirect buffer 88 */ 89 static void uvd_v3_1_ring_emit_ib(struct amdgpu_ring *ring, 90 struct amdgpu_job *job, 91 struct amdgpu_ib *ib, 92 uint32_t flags) 93 { 94 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0)); 95 amdgpu_ring_write(ring, ib->gpu_addr); 96 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); 97 amdgpu_ring_write(ring, ib->length_dw); 98 } 99 100 /** 101 * uvd_v3_1_ring_emit_fence - emit an fence & trap command 102 * 103 * @ring: amdgpu_ring pointer 104 * @addr: address 105 * @seq: sequence number 106 * @flags: fence related flags 107 * 108 * Write a fence and a trap command to the ring. 109 */ 110 static void uvd_v3_1_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 111 unsigned flags) 112 { 113 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 114 115 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 116 amdgpu_ring_write(ring, seq); 117 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 118 amdgpu_ring_write(ring, addr & 0xffffffff); 119 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 120 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); 121 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 122 amdgpu_ring_write(ring, 0); 123 124 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 125 amdgpu_ring_write(ring, 0); 126 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 127 amdgpu_ring_write(ring, 0); 128 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 129 amdgpu_ring_write(ring, 2); 130 } 131 132 /** 133 * uvd_v3_1_ring_test_ring - register write test 134 * 135 * @ring: amdgpu_ring pointer 136 * 137 * Test if we can successfully write to the context register 138 */ 139 static int uvd_v3_1_ring_test_ring(struct amdgpu_ring *ring) 140 { 141 struct amdgpu_device *adev = ring->adev; 142 uint32_t tmp = 0; 143 unsigned i; 144 int r; 145 146 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); 147 r = amdgpu_ring_alloc(ring, 3); 148 if (r) 149 return r; 150 151 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 152 amdgpu_ring_write(ring, 0xDEADBEEF); 153 amdgpu_ring_commit(ring); 154 for (i = 0; i < adev->usec_timeout; i++) { 155 tmp = RREG32(mmUVD_CONTEXT_ID); 156 if (tmp == 0xDEADBEEF) 157 break; 158 udelay(1); 159 } 160 161 if (i >= adev->usec_timeout) 162 r = -ETIMEDOUT; 163 164 return r; 165 } 166 167 static void uvd_v3_1_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 168 { 169 int i; 170 171 WARN_ON(ring->wptr % 2 || count % 2); 172 173 for (i = 0; i < count / 2; i++) { 174 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0)); 175 amdgpu_ring_write(ring, 0); 176 } 177 } 178 179 static const struct amdgpu_ring_funcs uvd_v3_1_ring_funcs = { 180 .type = AMDGPU_RING_TYPE_UVD, 181 .align_mask = 0xf, 182 .support_64bit_ptrs = false, 183 .no_user_fence = true, 184 .get_rptr = uvd_v3_1_ring_get_rptr, 185 .get_wptr = uvd_v3_1_ring_get_wptr, 186 .set_wptr = uvd_v3_1_ring_set_wptr, 187 .parse_cs = amdgpu_uvd_ring_parse_cs, 188 .emit_frame_size = 189 14, /* uvd_v3_1_ring_emit_fence x1 no user fence */ 190 .emit_ib_size = 4, /* uvd_v3_1_ring_emit_ib */ 191 .emit_ib = uvd_v3_1_ring_emit_ib, 192 .emit_fence = uvd_v3_1_ring_emit_fence, 193 .test_ring = uvd_v3_1_ring_test_ring, 194 .test_ib = amdgpu_uvd_ring_test_ib, 195 .insert_nop = uvd_v3_1_ring_insert_nop, 196 .pad_ib = amdgpu_ring_generic_pad_ib, 197 .begin_use = amdgpu_uvd_ring_begin_use, 198 .end_use = amdgpu_uvd_ring_end_use, 199 }; 200 201 static void uvd_v3_1_set_ring_funcs(struct amdgpu_device *adev) 202 { 203 adev->uvd.inst->ring.funcs = &uvd_v3_1_ring_funcs; 204 } 205 206 static void uvd_v3_1_set_dcm(struct amdgpu_device *adev, 207 bool sw_mode) 208 { 209 u32 tmp, tmp2; 210 211 WREG32_FIELD(UVD_CGC_GATE, REGS, 0); 212 213 tmp = RREG32(mmUVD_CGC_CTRL); 214 tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); 215 tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | 216 (1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) | 217 (4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT); 218 219 if (sw_mode) { 220 tmp &= ~0x7ffff800; 221 tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK | 222 UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK | 223 (7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT); 224 } else { 225 tmp |= 0x7ffff800; 226 tmp2 = 0; 227 } 228 229 WREG32(mmUVD_CGC_CTRL, tmp); 230 WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2); 231 } 232 233 /** 234 * uvd_v3_1_mc_resume - memory controller programming 235 * 236 * @adev: amdgpu_device pointer 237 * 238 * Let the UVD memory controller know it's offsets 239 */ 240 static void uvd_v3_1_mc_resume(struct amdgpu_device *adev) 241 { 242 uint64_t addr; 243 uint32_t size; 244 245 /* programm the VCPU memory controller bits 0-27 */ 246 addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3; 247 size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3; 248 WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr); 249 WREG32(mmUVD_VCPU_CACHE_SIZE0, size); 250 251 addr += size; 252 size = AMDGPU_UVD_HEAP_SIZE >> 3; 253 WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr); 254 WREG32(mmUVD_VCPU_CACHE_SIZE1, size); 255 256 addr += size; 257 size = (AMDGPU_UVD_STACK_SIZE + 258 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3; 259 WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr); 260 WREG32(mmUVD_VCPU_CACHE_SIZE2, size); 261 262 /* bits 28-31 */ 263 addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF; 264 WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); 265 266 /* bits 32-39 */ 267 addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF; 268 WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); 269 270 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 271 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 272 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 273 } 274 275 /** 276 * uvd_v3_1_fw_validate - FW validation operation 277 * 278 * @adev: amdgpu_device pointer 279 * 280 * Initialate and check UVD validation. 281 */ 282 static int uvd_v3_1_fw_validate(struct amdgpu_device *adev) 283 { 284 int i; 285 uint32_t keysel = adev->uvd.keyselect; 286 287 WREG32(mmUVD_FW_START, keysel); 288 289 for (i = 0; i < 10; ++i) { 290 mdelay(10); 291 if (RREG32(mmUVD_FW_STATUS) & UVD_FW_STATUS__DONE_MASK) 292 break; 293 } 294 295 if (i == 10) 296 return -ETIMEDOUT; 297 298 if (!(RREG32(mmUVD_FW_STATUS) & UVD_FW_STATUS__PASS_MASK)) 299 return -EINVAL; 300 301 for (i = 0; i < 10; ++i) { 302 mdelay(10); 303 if (!(RREG32(mmUVD_FW_STATUS) & UVD_FW_STATUS__BUSY_MASK)) 304 break; 305 } 306 307 if (i == 10) 308 return -ETIMEDOUT; 309 310 return 0; 311 } 312 313 /** 314 * uvd_v3_1_start - start UVD block 315 * 316 * @adev: amdgpu_device pointer 317 * 318 * Setup and start the UVD block 319 */ 320 static int uvd_v3_1_start(struct amdgpu_device *adev) 321 { 322 struct amdgpu_ring *ring = &adev->uvd.inst->ring; 323 uint32_t rb_bufsz; 324 int i, j, r; 325 u32 tmp; 326 /* disable byte swapping */ 327 u32 lmi_swap_cntl = 0; 328 u32 mp_swap_cntl = 0; 329 330 /* set uvd busy */ 331 WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2)); 332 333 uvd_v3_1_set_dcm(adev, true); 334 WREG32(mmUVD_CGC_GATE, 0); 335 336 /* take UVD block out of reset */ 337 WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); 338 mdelay(5); 339 340 /* enable VCPU clock */ 341 WREG32(mmUVD_VCPU_CNTL, 1 << 9); 342 343 /* disable interrupt */ 344 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); 345 346 #ifdef __BIG_ENDIAN 347 /* swap (8 in 32) RB and IB */ 348 lmi_swap_cntl = 0xa; 349 mp_swap_cntl = 0; 350 #endif 351 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); 352 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); 353 354 /* initialize UVD memory controller */ 355 WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | 356 (1 << 21) | (1 << 9) | (1 << 20)); 357 358 tmp = RREG32(mmUVD_MPC_CNTL); 359 WREG32(mmUVD_MPC_CNTL, tmp | 0x10); 360 361 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); 362 WREG32(mmUVD_MPC_SET_MUXA1, 0x0); 363 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); 364 WREG32(mmUVD_MPC_SET_MUXB1, 0x0); 365 WREG32(mmUVD_MPC_SET_ALU, 0); 366 WREG32(mmUVD_MPC_SET_MUX, 0x88); 367 368 tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL); 369 WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10)); 370 371 /* enable UMC */ 372 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 373 374 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK); 375 376 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); 377 378 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 379 380 mdelay(10); 381 382 for (i = 0; i < 10; ++i) { 383 uint32_t status; 384 for (j = 0; j < 100; ++j) { 385 status = RREG32(mmUVD_STATUS); 386 if (status & 2) 387 break; 388 mdelay(10); 389 } 390 r = 0; 391 if (status & 2) 392 break; 393 394 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); 395 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, 396 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 397 mdelay(10); 398 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 399 mdelay(10); 400 r = -1; 401 } 402 403 if (r) { 404 DRM_ERROR("UVD not responding, giving up!!!\n"); 405 return r; 406 } 407 408 /* enable interrupt */ 409 WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1)); 410 411 WREG32_P(mmUVD_STATUS, 0, ~(1<<2)); 412 413 /* force RBC into idle state */ 414 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); 415 416 /* Set the write pointer delay */ 417 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); 418 419 /* programm the 4GB memory segment for rptr and ring buffer */ 420 WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) | 421 (0x7 << 16) | (0x1 << 31)); 422 423 /* Initialize the ring buffer's read and write pointers */ 424 WREG32(mmUVD_RBC_RB_RPTR, 0x0); 425 426 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); 427 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); 428 429 /* set the ring address */ 430 WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr); 431 432 /* Set ring buffer size */ 433 rb_bufsz = order_base_2(ring->ring_size); 434 rb_bufsz = (0x1 << 8) | rb_bufsz; 435 WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f); 436 437 return 0; 438 } 439 440 /** 441 * uvd_v3_1_stop - stop UVD block 442 * 443 * @adev: amdgpu_device pointer 444 * 445 * stop the UVD block 446 */ 447 static void uvd_v3_1_stop(struct amdgpu_device *adev) 448 { 449 uint32_t i, j; 450 uint32_t status; 451 452 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); 453 454 for (i = 0; i < 10; ++i) { 455 for (j = 0; j < 100; ++j) { 456 status = RREG32(mmUVD_STATUS); 457 if (status & 2) 458 break; 459 mdelay(1); 460 } 461 if (status & 2) 462 break; 463 } 464 465 for (i = 0; i < 10; ++i) { 466 for (j = 0; j < 100; ++j) { 467 status = RREG32(mmUVD_LMI_STATUS); 468 if (status & 0xf) 469 break; 470 mdelay(1); 471 } 472 if (status & 0xf) 473 break; 474 } 475 476 /* Stall UMC and register bus before resetting VCPU */ 477 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 478 479 for (i = 0; i < 10; ++i) { 480 for (j = 0; j < 100; ++j) { 481 status = RREG32(mmUVD_LMI_STATUS); 482 if (status & 0x240) 483 break; 484 mdelay(1); 485 } 486 if (status & 0x240) 487 break; 488 } 489 490 WREG32_P(0x3D49, 0, ~(1 << 2)); 491 492 WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9)); 493 494 /* put LMI, VCPU, RBC etc... into reset */ 495 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | 496 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | 497 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); 498 499 WREG32(mmUVD_STATUS, 0); 500 501 uvd_v3_1_set_dcm(adev, false); 502 } 503 504 static int uvd_v3_1_set_interrupt_state(struct amdgpu_device *adev, 505 struct amdgpu_irq_src *source, 506 unsigned type, 507 enum amdgpu_interrupt_state state) 508 { 509 return 0; 510 } 511 512 static int uvd_v3_1_process_interrupt(struct amdgpu_device *adev, 513 struct amdgpu_irq_src *source, 514 struct amdgpu_iv_entry *entry) 515 { 516 DRM_DEBUG("IH: UVD TRAP\n"); 517 amdgpu_fence_process(&adev->uvd.inst->ring); 518 return 0; 519 } 520 521 522 static const struct amdgpu_irq_src_funcs uvd_v3_1_irq_funcs = { 523 .set = uvd_v3_1_set_interrupt_state, 524 .process = uvd_v3_1_process_interrupt, 525 }; 526 527 static void uvd_v3_1_set_irq_funcs(struct amdgpu_device *adev) 528 { 529 adev->uvd.inst->irq.num_types = 1; 530 adev->uvd.inst->irq.funcs = &uvd_v3_1_irq_funcs; 531 } 532 533 534 static int uvd_v3_1_early_init(void *handle) 535 { 536 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 537 adev->uvd.num_uvd_inst = 1; 538 539 uvd_v3_1_set_ring_funcs(adev); 540 uvd_v3_1_set_irq_funcs(adev); 541 542 return 0; 543 } 544 545 static int uvd_v3_1_sw_init(void *handle) 546 { 547 struct amdgpu_ring *ring; 548 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 549 int r; 550 void *ptr; 551 uint32_t ucode_len; 552 553 /* UVD TRAP */ 554 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq); 555 if (r) 556 return r; 557 558 r = amdgpu_uvd_sw_init(adev); 559 if (r) 560 return r; 561 562 ring = &adev->uvd.inst->ring; 563 sprintf(ring->name, "uvd"); 564 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0, 565 AMDGPU_RING_PRIO_DEFAULT, NULL); 566 if (r) 567 return r; 568 569 r = amdgpu_uvd_resume(adev); 570 if (r) 571 return r; 572 573 /* Retrieval firmware validate key */ 574 ptr = adev->uvd.inst[0].cpu_addr; 575 ptr += 192 + 16; 576 memcpy(&ucode_len, ptr, 4); 577 ptr += ucode_len; 578 memcpy(&adev->uvd.keyselect, ptr, 4); 579 580 return r; 581 } 582 583 static int uvd_v3_1_sw_fini(void *handle) 584 { 585 int r; 586 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 587 588 r = amdgpu_uvd_suspend(adev); 589 if (r) 590 return r; 591 592 return amdgpu_uvd_sw_fini(adev); 593 } 594 595 static void uvd_v3_1_enable_mgcg(struct amdgpu_device *adev, 596 bool enable) 597 { 598 u32 orig, data; 599 600 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) { 601 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 602 data |= 0x3fff; 603 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 604 605 orig = data = RREG32(mmUVD_CGC_CTRL); 606 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 607 if (orig != data) 608 WREG32(mmUVD_CGC_CTRL, data); 609 } else { 610 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 611 data &= ~0x3fff; 612 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 613 614 orig = data = RREG32(mmUVD_CGC_CTRL); 615 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 616 if (orig != data) 617 WREG32(mmUVD_CGC_CTRL, data); 618 } 619 } 620 621 /** 622 * uvd_v3_1_hw_init - start and test UVD block 623 * 624 * @handle: handle used to pass amdgpu_device pointer 625 * 626 * Initialize the hardware, boot up the VCPU and do some testing 627 */ 628 static int uvd_v3_1_hw_init(void *handle) 629 { 630 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 631 struct amdgpu_ring *ring = &adev->uvd.inst->ring; 632 uint32_t tmp; 633 int r; 634 635 uvd_v3_1_mc_resume(adev); 636 637 r = uvd_v3_1_fw_validate(adev); 638 if (r) { 639 DRM_ERROR("amdgpu: UVD Firmware validate fail (%d).\n", r); 640 return r; 641 } 642 643 uvd_v3_1_enable_mgcg(adev, true); 644 amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); 645 646 uvd_v3_1_start(adev); 647 648 r = amdgpu_ring_test_helper(ring); 649 if (r) { 650 DRM_ERROR("amdgpu: UVD ring test fail (%d).\n", r); 651 goto done; 652 } 653 654 r = amdgpu_ring_alloc(ring, 10); 655 if (r) { 656 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); 657 goto done; 658 } 659 660 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); 661 amdgpu_ring_write(ring, tmp); 662 amdgpu_ring_write(ring, 0xFFFFF); 663 664 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); 665 amdgpu_ring_write(ring, tmp); 666 amdgpu_ring_write(ring, 0xFFFFF); 667 668 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); 669 amdgpu_ring_write(ring, tmp); 670 amdgpu_ring_write(ring, 0xFFFFF); 671 672 /* Clear timeout status bits */ 673 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); 674 amdgpu_ring_write(ring, 0x8); 675 676 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); 677 amdgpu_ring_write(ring, 3); 678 679 amdgpu_ring_commit(ring); 680 681 done: 682 if (!r) 683 DRM_INFO("UVD initialized successfully.\n"); 684 685 return r; 686 } 687 688 /** 689 * uvd_v3_1_hw_fini - stop the hardware block 690 * 691 * @handle: handle used to pass amdgpu_device pointer 692 * 693 * Stop the UVD block, mark ring as not ready any more 694 */ 695 static int uvd_v3_1_hw_fini(void *handle) 696 { 697 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 698 699 cancel_delayed_work_sync(&adev->uvd.idle_work); 700 701 if (RREG32(mmUVD_STATUS) != 0) 702 uvd_v3_1_stop(adev); 703 704 return 0; 705 } 706 707 static int uvd_v3_1_prepare_suspend(void *handle) 708 { 709 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 710 711 return amdgpu_uvd_prepare_suspend(adev); 712 } 713 714 static int uvd_v3_1_suspend(void *handle) 715 { 716 int r; 717 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 718 719 /* 720 * Proper cleanups before halting the HW engine: 721 * - cancel the delayed idle work 722 * - enable powergating 723 * - enable clockgating 724 * - disable dpm 725 * 726 * TODO: to align with the VCN implementation, move the 727 * jobs for clockgating/powergating/dpm setting to 728 * ->set_powergating_state(). 729 */ 730 cancel_delayed_work_sync(&adev->uvd.idle_work); 731 732 if (adev->pm.dpm_enabled) { 733 amdgpu_dpm_enable_uvd(adev, false); 734 } else { 735 amdgpu_asic_set_uvd_clocks(adev, 0, 0); 736 /* shutdown the UVD block */ 737 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 738 AMD_PG_STATE_GATE); 739 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 740 AMD_CG_STATE_GATE); 741 } 742 743 r = uvd_v3_1_hw_fini(adev); 744 if (r) 745 return r; 746 747 return amdgpu_uvd_suspend(adev); 748 } 749 750 static int uvd_v3_1_resume(void *handle) 751 { 752 int r; 753 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 754 755 r = amdgpu_uvd_resume(adev); 756 if (r) 757 return r; 758 759 return uvd_v3_1_hw_init(adev); 760 } 761 762 static bool uvd_v3_1_is_idle(void *handle) 763 { 764 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 765 766 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); 767 } 768 769 static int uvd_v3_1_wait_for_idle(void *handle) 770 { 771 unsigned i; 772 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 773 774 for (i = 0; i < adev->usec_timeout; i++) { 775 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) 776 return 0; 777 } 778 return -ETIMEDOUT; 779 } 780 781 static int uvd_v3_1_soft_reset(void *handle) 782 { 783 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 784 785 uvd_v3_1_stop(adev); 786 787 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, 788 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); 789 mdelay(5); 790 791 return uvd_v3_1_start(adev); 792 } 793 794 static int uvd_v3_1_set_clockgating_state(void *handle, 795 enum amd_clockgating_state state) 796 { 797 return 0; 798 } 799 800 static int uvd_v3_1_set_powergating_state(void *handle, 801 enum amd_powergating_state state) 802 { 803 return 0; 804 } 805 806 static const struct amd_ip_funcs uvd_v3_1_ip_funcs = { 807 .name = "uvd_v3_1", 808 .early_init = uvd_v3_1_early_init, 809 .late_init = NULL, 810 .sw_init = uvd_v3_1_sw_init, 811 .sw_fini = uvd_v3_1_sw_fini, 812 .hw_init = uvd_v3_1_hw_init, 813 .hw_fini = uvd_v3_1_hw_fini, 814 .prepare_suspend = uvd_v3_1_prepare_suspend, 815 .suspend = uvd_v3_1_suspend, 816 .resume = uvd_v3_1_resume, 817 .is_idle = uvd_v3_1_is_idle, 818 .wait_for_idle = uvd_v3_1_wait_for_idle, 819 .soft_reset = uvd_v3_1_soft_reset, 820 .set_clockgating_state = uvd_v3_1_set_clockgating_state, 821 .set_powergating_state = uvd_v3_1_set_powergating_state, 822 }; 823 824 const struct amdgpu_ip_block_version uvd_v3_1_ip_block = { 825 .type = AMD_IP_BLOCK_TYPE_UVD, 826 .major = 3, 827 .minor = 1, 828 .rev = 0, 829 .funcs = &uvd_v3_1_ip_funcs, 830 }; 831