1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König <christian.koenig@amd.com> 23 */ 24 25 #include <linux/firmware.h> 26 #include <drm/drmP.h> 27 #include "amdgpu.h" 28 #include "amdgpu_uvd.h" 29 #include "vid.h" 30 #include "uvd/uvd_6_0_d.h" 31 #include "uvd/uvd_6_0_sh_mask.h" 32 #include "oss/oss_2_0_d.h" 33 #include "oss/oss_2_0_sh_mask.h" 34 #include "smu/smu_7_1_3_d.h" 35 #include "smu/smu_7_1_3_sh_mask.h" 36 #include "bif/bif_5_1_d.h" 37 #include "gmc/gmc_8_1_d.h" 38 #include "vi.h" 39 40 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev); 41 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev); 42 static int uvd_v6_0_start(struct amdgpu_device *adev); 43 static void uvd_v6_0_stop(struct amdgpu_device *adev); 44 static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev); 45 46 /** 47 * uvd_v6_0_ring_get_rptr - get read pointer 48 * 49 * @ring: amdgpu_ring pointer 50 * 51 * Returns the current hardware read pointer 52 */ 53 static uint32_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring) 54 { 55 struct amdgpu_device *adev = ring->adev; 56 57 return RREG32(mmUVD_RBC_RB_RPTR); 58 } 59 60 /** 61 * uvd_v6_0_ring_get_wptr - get write pointer 62 * 63 * @ring: amdgpu_ring pointer 64 * 65 * Returns the current hardware write pointer 66 */ 67 static uint32_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring) 68 { 69 struct amdgpu_device *adev = ring->adev; 70 71 return RREG32(mmUVD_RBC_RB_WPTR); 72 } 73 74 /** 75 * uvd_v6_0_ring_set_wptr - set write pointer 76 * 77 * @ring: amdgpu_ring pointer 78 * 79 * Commits the write pointer to the hardware 80 */ 81 static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring) 82 { 83 struct amdgpu_device *adev = ring->adev; 84 85 WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); 86 } 87 88 static int uvd_v6_0_early_init(void *handle) 89 { 90 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 91 92 uvd_v6_0_set_ring_funcs(adev); 93 uvd_v6_0_set_irq_funcs(adev); 94 95 return 0; 96 } 97 98 static int uvd_v6_0_sw_init(void *handle) 99 { 100 struct amdgpu_ring *ring; 101 int r; 102 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 103 104 /* UVD TRAP */ 105 r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq); 106 if (r) 107 return r; 108 109 r = amdgpu_uvd_sw_init(adev); 110 if (r) 111 return r; 112 113 r = amdgpu_uvd_resume(adev); 114 if (r) 115 return r; 116 117 ring = &adev->uvd.ring; 118 sprintf(ring->name, "uvd"); 119 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); 120 121 return r; 122 } 123 124 static int uvd_v6_0_sw_fini(void *handle) 125 { 126 int r; 127 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 128 129 r = amdgpu_uvd_suspend(adev); 130 if (r) 131 return r; 132 133 r = amdgpu_uvd_sw_fini(adev); 134 if (r) 135 return r; 136 137 return r; 138 } 139 140 /** 141 * uvd_v6_0_hw_init - start and test UVD block 142 * 143 * @adev: amdgpu_device pointer 144 * 145 * Initialize the hardware, boot up the VCPU and do some testing 146 */ 147 static int uvd_v6_0_hw_init(void *handle) 148 { 149 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 150 struct amdgpu_ring *ring = &adev->uvd.ring; 151 uint32_t tmp; 152 int r; 153 154 r = uvd_v6_0_start(adev); 155 if (r) 156 goto done; 157 158 ring->ready = true; 159 r = amdgpu_ring_test_ring(ring); 160 if (r) { 161 ring->ready = false; 162 goto done; 163 } 164 165 r = amdgpu_ring_alloc(ring, 10); 166 if (r) { 167 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); 168 goto done; 169 } 170 171 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); 172 amdgpu_ring_write(ring, tmp); 173 amdgpu_ring_write(ring, 0xFFFFF); 174 175 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); 176 amdgpu_ring_write(ring, tmp); 177 amdgpu_ring_write(ring, 0xFFFFF); 178 179 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); 180 amdgpu_ring_write(ring, tmp); 181 amdgpu_ring_write(ring, 0xFFFFF); 182 183 /* Clear timeout status bits */ 184 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); 185 amdgpu_ring_write(ring, 0x8); 186 187 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); 188 amdgpu_ring_write(ring, 3); 189 190 amdgpu_ring_commit(ring); 191 192 done: 193 if (!r) 194 DRM_INFO("UVD initialized successfully.\n"); 195 196 return r; 197 } 198 199 /** 200 * uvd_v6_0_hw_fini - stop the hardware block 201 * 202 * @adev: amdgpu_device pointer 203 * 204 * Stop the UVD block, mark ring as not ready any more 205 */ 206 static int uvd_v6_0_hw_fini(void *handle) 207 { 208 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 209 struct amdgpu_ring *ring = &adev->uvd.ring; 210 211 uvd_v6_0_stop(adev); 212 ring->ready = false; 213 214 return 0; 215 } 216 217 static int uvd_v6_0_suspend(void *handle) 218 { 219 int r; 220 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 221 222 r = uvd_v6_0_hw_fini(adev); 223 if (r) 224 return r; 225 226 /* Skip this for APU for now */ 227 if (!(adev->flags & AMD_IS_APU)) { 228 r = amdgpu_uvd_suspend(adev); 229 if (r) 230 return r; 231 } 232 233 return r; 234 } 235 236 static int uvd_v6_0_resume(void *handle) 237 { 238 int r; 239 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 240 241 /* Skip this for APU for now */ 242 if (!(adev->flags & AMD_IS_APU)) { 243 r = amdgpu_uvd_resume(adev); 244 if (r) 245 return r; 246 } 247 r = uvd_v6_0_hw_init(adev); 248 if (r) 249 return r; 250 251 return r; 252 } 253 254 /** 255 * uvd_v6_0_mc_resume - memory controller programming 256 * 257 * @adev: amdgpu_device pointer 258 * 259 * Let the UVD memory controller know it's offsets 260 */ 261 static void uvd_v6_0_mc_resume(struct amdgpu_device *adev) 262 { 263 uint64_t offset; 264 uint32_t size; 265 266 /* programm memory controller bits 0-27 */ 267 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 268 lower_32_bits(adev->uvd.gpu_addr)); 269 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 270 upper_32_bits(adev->uvd.gpu_addr)); 271 272 offset = AMDGPU_UVD_FIRMWARE_OFFSET; 273 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); 274 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3); 275 WREG32(mmUVD_VCPU_CACHE_SIZE0, size); 276 277 offset += size; 278 size = AMDGPU_UVD_HEAP_SIZE; 279 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3); 280 WREG32(mmUVD_VCPU_CACHE_SIZE1, size); 281 282 offset += size; 283 size = AMDGPU_UVD_STACK_SIZE + 284 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles); 285 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); 286 WREG32(mmUVD_VCPU_CACHE_SIZE2, size); 287 288 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 289 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 290 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 291 292 WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles); 293 } 294 295 #if 0 296 static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev, 297 bool enable) 298 { 299 u32 data, data1; 300 301 data = RREG32(mmUVD_CGC_GATE); 302 data1 = RREG32(mmUVD_SUVD_CGC_GATE); 303 if (enable) { 304 data |= UVD_CGC_GATE__SYS_MASK | 305 UVD_CGC_GATE__UDEC_MASK | 306 UVD_CGC_GATE__MPEG2_MASK | 307 UVD_CGC_GATE__RBC_MASK | 308 UVD_CGC_GATE__LMI_MC_MASK | 309 UVD_CGC_GATE__IDCT_MASK | 310 UVD_CGC_GATE__MPRD_MASK | 311 UVD_CGC_GATE__MPC_MASK | 312 UVD_CGC_GATE__LBSI_MASK | 313 UVD_CGC_GATE__LRBBM_MASK | 314 UVD_CGC_GATE__UDEC_RE_MASK | 315 UVD_CGC_GATE__UDEC_CM_MASK | 316 UVD_CGC_GATE__UDEC_IT_MASK | 317 UVD_CGC_GATE__UDEC_DB_MASK | 318 UVD_CGC_GATE__UDEC_MP_MASK | 319 UVD_CGC_GATE__WCB_MASK | 320 UVD_CGC_GATE__VCPU_MASK | 321 UVD_CGC_GATE__SCPU_MASK; 322 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK | 323 UVD_SUVD_CGC_GATE__SIT_MASK | 324 UVD_SUVD_CGC_GATE__SMP_MASK | 325 UVD_SUVD_CGC_GATE__SCM_MASK | 326 UVD_SUVD_CGC_GATE__SDB_MASK | 327 UVD_SUVD_CGC_GATE__SRE_H264_MASK | 328 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK | 329 UVD_SUVD_CGC_GATE__SIT_H264_MASK | 330 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK | 331 UVD_SUVD_CGC_GATE__SCM_H264_MASK | 332 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK | 333 UVD_SUVD_CGC_GATE__SDB_H264_MASK | 334 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK; 335 } else { 336 data &= ~(UVD_CGC_GATE__SYS_MASK | 337 UVD_CGC_GATE__UDEC_MASK | 338 UVD_CGC_GATE__MPEG2_MASK | 339 UVD_CGC_GATE__RBC_MASK | 340 UVD_CGC_GATE__LMI_MC_MASK | 341 UVD_CGC_GATE__LMI_UMC_MASK | 342 UVD_CGC_GATE__IDCT_MASK | 343 UVD_CGC_GATE__MPRD_MASK | 344 UVD_CGC_GATE__MPC_MASK | 345 UVD_CGC_GATE__LBSI_MASK | 346 UVD_CGC_GATE__LRBBM_MASK | 347 UVD_CGC_GATE__UDEC_RE_MASK | 348 UVD_CGC_GATE__UDEC_CM_MASK | 349 UVD_CGC_GATE__UDEC_IT_MASK | 350 UVD_CGC_GATE__UDEC_DB_MASK | 351 UVD_CGC_GATE__UDEC_MP_MASK | 352 UVD_CGC_GATE__WCB_MASK | 353 UVD_CGC_GATE__VCPU_MASK | 354 UVD_CGC_GATE__SCPU_MASK); 355 data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK | 356 UVD_SUVD_CGC_GATE__SIT_MASK | 357 UVD_SUVD_CGC_GATE__SMP_MASK | 358 UVD_SUVD_CGC_GATE__SCM_MASK | 359 UVD_SUVD_CGC_GATE__SDB_MASK | 360 UVD_SUVD_CGC_GATE__SRE_H264_MASK | 361 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK | 362 UVD_SUVD_CGC_GATE__SIT_H264_MASK | 363 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK | 364 UVD_SUVD_CGC_GATE__SCM_H264_MASK | 365 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK | 366 UVD_SUVD_CGC_GATE__SDB_H264_MASK | 367 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK); 368 } 369 WREG32(mmUVD_CGC_GATE, data); 370 WREG32(mmUVD_SUVD_CGC_GATE, data1); 371 } 372 #endif 373 374 /** 375 * uvd_v6_0_start - start UVD block 376 * 377 * @adev: amdgpu_device pointer 378 * 379 * Setup and start the UVD block 380 */ 381 static int uvd_v6_0_start(struct amdgpu_device *adev) 382 { 383 struct amdgpu_ring *ring = &adev->uvd.ring; 384 uint32_t rb_bufsz, tmp; 385 uint32_t lmi_swap_cntl; 386 uint32_t mp_swap_cntl; 387 int i, j, r; 388 389 /* disable DPG */ 390 WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); 391 392 /* disable byte swapping */ 393 lmi_swap_cntl = 0; 394 mp_swap_cntl = 0; 395 396 uvd_v6_0_mc_resume(adev); 397 398 /* disable clock gating */ 399 WREG32_FIELD(UVD_CGC_CTRL, DYN_CLOCK_MODE, 0); 400 401 /* disable interupt */ 402 WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0); 403 404 /* stall UMC and register bus before resetting VCPU */ 405 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1); 406 mdelay(1); 407 408 /* put LMI, VCPU, RBC etc... into reset */ 409 WREG32(mmUVD_SOFT_RESET, 410 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | 411 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | 412 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | 413 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | 414 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | 415 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | 416 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | 417 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); 418 mdelay(5); 419 420 /* take UVD block out of reset */ 421 WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0); 422 mdelay(5); 423 424 /* initialize UVD memory controller */ 425 WREG32(mmUVD_LMI_CTRL, 426 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | 427 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | 428 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | 429 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | 430 UVD_LMI_CTRL__REQ_MODE_MASK | 431 UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK); 432 433 #ifdef __BIG_ENDIAN 434 /* swap (8 in 32) RB and IB */ 435 lmi_swap_cntl = 0xa; 436 mp_swap_cntl = 0; 437 #endif 438 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); 439 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); 440 441 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); 442 WREG32(mmUVD_MPC_SET_MUXA1, 0x0); 443 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); 444 WREG32(mmUVD_MPC_SET_MUXB1, 0x0); 445 WREG32(mmUVD_MPC_SET_ALU, 0); 446 WREG32(mmUVD_MPC_SET_MUX, 0x88); 447 448 /* take all subblocks out of reset, except VCPU */ 449 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 450 mdelay(5); 451 452 /* enable VCPU clock */ 453 WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK); 454 455 /* enable UMC */ 456 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0); 457 458 /* boot up the VCPU */ 459 WREG32(mmUVD_SOFT_RESET, 0); 460 mdelay(10); 461 462 for (i = 0; i < 10; ++i) { 463 uint32_t status; 464 465 for (j = 0; j < 100; ++j) { 466 status = RREG32(mmUVD_STATUS); 467 if (status & 2) 468 break; 469 mdelay(10); 470 } 471 r = 0; 472 if (status & 2) 473 break; 474 475 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); 476 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1); 477 mdelay(10); 478 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0); 479 mdelay(10); 480 r = -1; 481 } 482 483 if (r) { 484 DRM_ERROR("UVD not responding, giving up!!!\n"); 485 return r; 486 } 487 /* enable master interrupt */ 488 WREG32_P(mmUVD_MASTINT_EN, 489 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), 490 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK)); 491 492 /* clear the bit 4 of UVD_STATUS */ 493 WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); 494 495 /* force RBC into idle state */ 496 rb_bufsz = order_base_2(ring->ring_size); 497 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); 498 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); 499 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); 500 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); 501 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); 502 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); 503 WREG32(mmUVD_RBC_RB_CNTL, tmp); 504 505 /* set the write pointer delay */ 506 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); 507 508 /* set the wb address */ 509 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); 510 511 /* programm the RB_BASE for ring buffer */ 512 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, 513 lower_32_bits(ring->gpu_addr)); 514 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, 515 upper_32_bits(ring->gpu_addr)); 516 517 /* Initialize the ring buffer's read and write pointers */ 518 WREG32(mmUVD_RBC_RB_RPTR, 0); 519 520 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); 521 WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); 522 523 WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0); 524 525 return 0; 526 } 527 528 /** 529 * uvd_v6_0_stop - stop UVD block 530 * 531 * @adev: amdgpu_device pointer 532 * 533 * stop the UVD block 534 */ 535 static void uvd_v6_0_stop(struct amdgpu_device *adev) 536 { 537 /* force RBC into idle state */ 538 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); 539 540 /* Stall UMC and register bus before resetting VCPU */ 541 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 542 mdelay(1); 543 544 /* put VCPU into reset */ 545 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 546 mdelay(5); 547 548 /* disable VCPU clock */ 549 WREG32(mmUVD_VCPU_CNTL, 0x0); 550 551 /* Unstall UMC and register bus */ 552 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 553 } 554 555 /** 556 * uvd_v6_0_ring_emit_fence - emit an fence & trap command 557 * 558 * @ring: amdgpu_ring pointer 559 * @fence: fence to emit 560 * 561 * Write a fence and a trap command to the ring. 562 */ 563 static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 564 unsigned flags) 565 { 566 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 567 568 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 569 amdgpu_ring_write(ring, seq); 570 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 571 amdgpu_ring_write(ring, addr & 0xffffffff); 572 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 573 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); 574 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 575 amdgpu_ring_write(ring, 0); 576 577 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 578 amdgpu_ring_write(ring, 0); 579 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 580 amdgpu_ring_write(ring, 0); 581 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 582 amdgpu_ring_write(ring, 2); 583 } 584 585 /** 586 * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush 587 * 588 * @ring: amdgpu_ring pointer 589 * 590 * Emits an hdp flush. 591 */ 592 static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 593 { 594 amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0)); 595 amdgpu_ring_write(ring, 0); 596 } 597 598 /** 599 * uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate 600 * 601 * @ring: amdgpu_ring pointer 602 * 603 * Emits an hdp invalidate. 604 */ 605 static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) 606 { 607 amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0)); 608 amdgpu_ring_write(ring, 1); 609 } 610 611 /** 612 * uvd_v6_0_ring_test_ring - register write test 613 * 614 * @ring: amdgpu_ring pointer 615 * 616 * Test if we can successfully write to the context register 617 */ 618 static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) 619 { 620 struct amdgpu_device *adev = ring->adev; 621 uint32_t tmp = 0; 622 unsigned i; 623 int r; 624 625 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); 626 r = amdgpu_ring_alloc(ring, 3); 627 if (r) { 628 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 629 ring->idx, r); 630 return r; 631 } 632 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 633 amdgpu_ring_write(ring, 0xDEADBEEF); 634 amdgpu_ring_commit(ring); 635 for (i = 0; i < adev->usec_timeout; i++) { 636 tmp = RREG32(mmUVD_CONTEXT_ID); 637 if (tmp == 0xDEADBEEF) 638 break; 639 DRM_UDELAY(1); 640 } 641 642 if (i < adev->usec_timeout) { 643 DRM_INFO("ring test on %d succeeded in %d usecs\n", 644 ring->idx, i); 645 } else { 646 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", 647 ring->idx, tmp); 648 r = -EINVAL; 649 } 650 return r; 651 } 652 653 /** 654 * uvd_v6_0_ring_emit_ib - execute indirect buffer 655 * 656 * @ring: amdgpu_ring pointer 657 * @ib: indirect buffer to execute 658 * 659 * Write ring commands to execute the indirect buffer 660 */ 661 static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, 662 struct amdgpu_ib *ib, 663 unsigned vm_id, bool ctx_switch) 664 { 665 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0)); 666 amdgpu_ring_write(ring, vm_id); 667 668 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); 669 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 670 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0)); 671 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 672 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); 673 amdgpu_ring_write(ring, ib->length_dw); 674 } 675 676 static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 677 unsigned vm_id, uint64_t pd_addr) 678 { 679 uint32_t reg; 680 681 if (vm_id < 8) 682 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id; 683 else 684 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8; 685 686 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 687 amdgpu_ring_write(ring, reg << 2); 688 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 689 amdgpu_ring_write(ring, pd_addr >> 12); 690 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 691 amdgpu_ring_write(ring, 0x8); 692 693 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 694 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); 695 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 696 amdgpu_ring_write(ring, 1 << vm_id); 697 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 698 amdgpu_ring_write(ring, 0x8); 699 700 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 701 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); 702 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 703 amdgpu_ring_write(ring, 0); 704 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0)); 705 amdgpu_ring_write(ring, 1 << vm_id); /* mask */ 706 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 707 amdgpu_ring_write(ring, 0xC); 708 } 709 710 static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 711 { 712 uint32_t seq = ring->fence_drv.sync_seq; 713 uint64_t addr = ring->fence_drv.gpu_addr; 714 715 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 716 amdgpu_ring_write(ring, lower_32_bits(addr)); 717 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 718 amdgpu_ring_write(ring, upper_32_bits(addr)); 719 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0)); 720 amdgpu_ring_write(ring, 0xffffffff); /* mask */ 721 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0)); 722 amdgpu_ring_write(ring, seq); 723 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 724 amdgpu_ring_write(ring, 0xE); 725 } 726 727 static bool uvd_v6_0_is_idle(void *handle) 728 { 729 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 730 731 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); 732 } 733 734 static int uvd_v6_0_wait_for_idle(void *handle) 735 { 736 unsigned i; 737 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 738 739 for (i = 0; i < adev->usec_timeout; i++) { 740 if (uvd_v6_0_is_idle(handle)) 741 return 0; 742 } 743 return -ETIMEDOUT; 744 } 745 746 #define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd 747 static bool uvd_v6_0_check_soft_reset(void *handle) 748 { 749 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 750 u32 srbm_soft_reset = 0; 751 u32 tmp = RREG32(mmSRBM_STATUS); 752 753 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) || 754 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) || 755 (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK)) 756 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); 757 758 if (srbm_soft_reset) { 759 adev->uvd.srbm_soft_reset = srbm_soft_reset; 760 return true; 761 } else { 762 adev->uvd.srbm_soft_reset = 0; 763 return false; 764 } 765 } 766 767 static int uvd_v6_0_pre_soft_reset(void *handle) 768 { 769 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 770 771 if (!adev->uvd.srbm_soft_reset) 772 return 0; 773 774 uvd_v6_0_stop(adev); 775 return 0; 776 } 777 778 static int uvd_v6_0_soft_reset(void *handle) 779 { 780 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 781 u32 srbm_soft_reset; 782 783 if (!adev->uvd.srbm_soft_reset) 784 return 0; 785 srbm_soft_reset = adev->uvd.srbm_soft_reset; 786 787 if (srbm_soft_reset) { 788 u32 tmp; 789 790 tmp = RREG32(mmSRBM_SOFT_RESET); 791 tmp |= srbm_soft_reset; 792 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 793 WREG32(mmSRBM_SOFT_RESET, tmp); 794 tmp = RREG32(mmSRBM_SOFT_RESET); 795 796 udelay(50); 797 798 tmp &= ~srbm_soft_reset; 799 WREG32(mmSRBM_SOFT_RESET, tmp); 800 tmp = RREG32(mmSRBM_SOFT_RESET); 801 802 /* Wait a little for things to settle down */ 803 udelay(50); 804 } 805 806 return 0; 807 } 808 809 static int uvd_v6_0_post_soft_reset(void *handle) 810 { 811 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 812 813 if (!adev->uvd.srbm_soft_reset) 814 return 0; 815 816 mdelay(5); 817 818 return uvd_v6_0_start(adev); 819 } 820 821 static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev, 822 struct amdgpu_irq_src *source, 823 unsigned type, 824 enum amdgpu_interrupt_state state) 825 { 826 // TODO 827 return 0; 828 } 829 830 static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev, 831 struct amdgpu_irq_src *source, 832 struct amdgpu_iv_entry *entry) 833 { 834 DRM_DEBUG("IH: UVD TRAP\n"); 835 amdgpu_fence_process(&adev->uvd.ring); 836 return 0; 837 } 838 839 static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev) 840 { 841 uint32_t data, data1, data2, suvd_flags; 842 843 data = RREG32(mmUVD_CGC_CTRL); 844 data1 = RREG32(mmUVD_SUVD_CGC_GATE); 845 data2 = RREG32(mmUVD_SUVD_CGC_CTRL); 846 847 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | 848 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); 849 850 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | 851 UVD_SUVD_CGC_GATE__SIT_MASK | 852 UVD_SUVD_CGC_GATE__SMP_MASK | 853 UVD_SUVD_CGC_GATE__SCM_MASK | 854 UVD_SUVD_CGC_GATE__SDB_MASK; 855 856 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | 857 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) | 858 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY)); 859 860 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | 861 UVD_CGC_CTRL__UDEC_CM_MODE_MASK | 862 UVD_CGC_CTRL__UDEC_IT_MODE_MASK | 863 UVD_CGC_CTRL__UDEC_DB_MODE_MASK | 864 UVD_CGC_CTRL__UDEC_MP_MODE_MASK | 865 UVD_CGC_CTRL__SYS_MODE_MASK | 866 UVD_CGC_CTRL__UDEC_MODE_MASK | 867 UVD_CGC_CTRL__MPEG2_MODE_MASK | 868 UVD_CGC_CTRL__REGS_MODE_MASK | 869 UVD_CGC_CTRL__RBC_MODE_MASK | 870 UVD_CGC_CTRL__LMI_MC_MODE_MASK | 871 UVD_CGC_CTRL__LMI_UMC_MODE_MASK | 872 UVD_CGC_CTRL__IDCT_MODE_MASK | 873 UVD_CGC_CTRL__MPRD_MODE_MASK | 874 UVD_CGC_CTRL__MPC_MODE_MASK | 875 UVD_CGC_CTRL__LBSI_MODE_MASK | 876 UVD_CGC_CTRL__LRBBM_MODE_MASK | 877 UVD_CGC_CTRL__WCB_MODE_MASK | 878 UVD_CGC_CTRL__VCPU_MODE_MASK | 879 UVD_CGC_CTRL__JPEG_MODE_MASK | 880 UVD_CGC_CTRL__SCPU_MODE_MASK | 881 UVD_CGC_CTRL__JPEG2_MODE_MASK); 882 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | 883 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | 884 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK | 885 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK | 886 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK); 887 data1 |= suvd_flags; 888 889 WREG32(mmUVD_CGC_CTRL, data); 890 WREG32(mmUVD_CGC_GATE, 0); 891 WREG32(mmUVD_SUVD_CGC_GATE, data1); 892 WREG32(mmUVD_SUVD_CGC_CTRL, data2); 893 } 894 895 #if 0 896 static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev) 897 { 898 uint32_t data, data1, cgc_flags, suvd_flags; 899 900 data = RREG32(mmUVD_CGC_GATE); 901 data1 = RREG32(mmUVD_SUVD_CGC_GATE); 902 903 cgc_flags = UVD_CGC_GATE__SYS_MASK | 904 UVD_CGC_GATE__UDEC_MASK | 905 UVD_CGC_GATE__MPEG2_MASK | 906 UVD_CGC_GATE__RBC_MASK | 907 UVD_CGC_GATE__LMI_MC_MASK | 908 UVD_CGC_GATE__IDCT_MASK | 909 UVD_CGC_GATE__MPRD_MASK | 910 UVD_CGC_GATE__MPC_MASK | 911 UVD_CGC_GATE__LBSI_MASK | 912 UVD_CGC_GATE__LRBBM_MASK | 913 UVD_CGC_GATE__UDEC_RE_MASK | 914 UVD_CGC_GATE__UDEC_CM_MASK | 915 UVD_CGC_GATE__UDEC_IT_MASK | 916 UVD_CGC_GATE__UDEC_DB_MASK | 917 UVD_CGC_GATE__UDEC_MP_MASK | 918 UVD_CGC_GATE__WCB_MASK | 919 UVD_CGC_GATE__VCPU_MASK | 920 UVD_CGC_GATE__SCPU_MASK | 921 UVD_CGC_GATE__JPEG_MASK | 922 UVD_CGC_GATE__JPEG2_MASK; 923 924 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | 925 UVD_SUVD_CGC_GATE__SIT_MASK | 926 UVD_SUVD_CGC_GATE__SMP_MASK | 927 UVD_SUVD_CGC_GATE__SCM_MASK | 928 UVD_SUVD_CGC_GATE__SDB_MASK; 929 930 data |= cgc_flags; 931 data1 |= suvd_flags; 932 933 WREG32(mmUVD_CGC_GATE, data); 934 WREG32(mmUVD_SUVD_CGC_GATE, data1); 935 } 936 #endif 937 938 static void uvd_v6_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) 939 { 940 u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); 941 942 if (enable) 943 tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK | 944 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK); 945 else 946 tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK | 947 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK); 948 949 WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); 950 } 951 952 static int uvd_v6_0_set_clockgating_state(void *handle, 953 enum amd_clockgating_state state) 954 { 955 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 956 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 957 958 uvd_v6_0_set_bypass_mode(adev, enable); 959 960 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) 961 return 0; 962 963 if (enable) { 964 /* disable HW gating and enable Sw gating */ 965 uvd_v6_0_set_sw_clock_gating(adev); 966 } else { 967 /* wait for STATUS to clear */ 968 if (uvd_v6_0_wait_for_idle(handle)) 969 return -EBUSY; 970 971 /* enable HW gates because UVD is idle */ 972 /* uvd_v6_0_set_hw_clock_gating(adev); */ 973 } 974 975 return 0; 976 } 977 978 static int uvd_v6_0_set_powergating_state(void *handle, 979 enum amd_powergating_state state) 980 { 981 /* This doesn't actually powergate the UVD block. 982 * That's done in the dpm code via the SMC. This 983 * just re-inits the block as necessary. The actual 984 * gating still happens in the dpm code. We should 985 * revisit this when there is a cleaner line between 986 * the smc and the hw blocks 987 */ 988 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 989 990 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) 991 return 0; 992 993 WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK); 994 995 if (state == AMD_PG_STATE_GATE) { 996 uvd_v6_0_stop(adev); 997 return 0; 998 } else { 999 return uvd_v6_0_start(adev); 1000 } 1001 } 1002 1003 static const struct amd_ip_funcs uvd_v6_0_ip_funcs = { 1004 .name = "uvd_v6_0", 1005 .early_init = uvd_v6_0_early_init, 1006 .late_init = NULL, 1007 .sw_init = uvd_v6_0_sw_init, 1008 .sw_fini = uvd_v6_0_sw_fini, 1009 .hw_init = uvd_v6_0_hw_init, 1010 .hw_fini = uvd_v6_0_hw_fini, 1011 .suspend = uvd_v6_0_suspend, 1012 .resume = uvd_v6_0_resume, 1013 .is_idle = uvd_v6_0_is_idle, 1014 .wait_for_idle = uvd_v6_0_wait_for_idle, 1015 .check_soft_reset = uvd_v6_0_check_soft_reset, 1016 .pre_soft_reset = uvd_v6_0_pre_soft_reset, 1017 .soft_reset = uvd_v6_0_soft_reset, 1018 .post_soft_reset = uvd_v6_0_post_soft_reset, 1019 .set_clockgating_state = uvd_v6_0_set_clockgating_state, 1020 .set_powergating_state = uvd_v6_0_set_powergating_state, 1021 }; 1022 1023 static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { 1024 .type = AMDGPU_RING_TYPE_UVD, 1025 .align_mask = 0xf, 1026 .nop = PACKET0(mmUVD_NO_OP, 0), 1027 .get_rptr = uvd_v6_0_ring_get_rptr, 1028 .get_wptr = uvd_v6_0_ring_get_wptr, 1029 .set_wptr = uvd_v6_0_ring_set_wptr, 1030 .parse_cs = amdgpu_uvd_ring_parse_cs, 1031 .emit_frame_size = 1032 2 + /* uvd_v6_0_ring_emit_hdp_flush */ 1033 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */ 1034 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ 1035 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */ 1036 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */ 1037 .emit_ib = uvd_v6_0_ring_emit_ib, 1038 .emit_fence = uvd_v6_0_ring_emit_fence, 1039 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, 1040 .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate, 1041 .test_ring = uvd_v6_0_ring_test_ring, 1042 .test_ib = amdgpu_uvd_ring_test_ib, 1043 .insert_nop = amdgpu_ring_insert_nop, 1044 .pad_ib = amdgpu_ring_generic_pad_ib, 1045 .begin_use = amdgpu_uvd_ring_begin_use, 1046 .end_use = amdgpu_uvd_ring_end_use, 1047 }; 1048 1049 static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { 1050 .type = AMDGPU_RING_TYPE_UVD, 1051 .align_mask = 0xf, 1052 .nop = PACKET0(mmUVD_NO_OP, 0), 1053 .get_rptr = uvd_v6_0_ring_get_rptr, 1054 .get_wptr = uvd_v6_0_ring_get_wptr, 1055 .set_wptr = uvd_v6_0_ring_set_wptr, 1056 .emit_frame_size = 1057 2 + /* uvd_v6_0_ring_emit_hdp_flush */ 1058 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */ 1059 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ 1060 20 + /* uvd_v6_0_ring_emit_vm_flush */ 1061 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */ 1062 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */ 1063 .emit_ib = uvd_v6_0_ring_emit_ib, 1064 .emit_fence = uvd_v6_0_ring_emit_fence, 1065 .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush, 1066 .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync, 1067 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, 1068 .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate, 1069 .test_ring = uvd_v6_0_ring_test_ring, 1070 .test_ib = amdgpu_uvd_ring_test_ib, 1071 .insert_nop = amdgpu_ring_insert_nop, 1072 .pad_ib = amdgpu_ring_generic_pad_ib, 1073 .begin_use = amdgpu_uvd_ring_begin_use, 1074 .end_use = amdgpu_uvd_ring_end_use, 1075 }; 1076 1077 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) 1078 { 1079 if (adev->asic_type >= CHIP_POLARIS10) { 1080 adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs; 1081 DRM_INFO("UVD is enabled in VM mode\n"); 1082 } else { 1083 adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs; 1084 DRM_INFO("UVD is enabled in physical mode\n"); 1085 } 1086 } 1087 1088 static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = { 1089 .set = uvd_v6_0_set_interrupt_state, 1090 .process = uvd_v6_0_process_interrupt, 1091 }; 1092 1093 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev) 1094 { 1095 adev->uvd.irq.num_types = 1; 1096 adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs; 1097 } 1098 1099 const struct amdgpu_ip_block_version uvd_v6_0_ip_block = 1100 { 1101 .type = AMD_IP_BLOCK_TYPE_UVD, 1102 .major = 6, 1103 .minor = 0, 1104 .rev = 0, 1105 .funcs = &uvd_v6_0_ip_funcs, 1106 }; 1107 1108 const struct amdgpu_ip_block_version uvd_v6_2_ip_block = 1109 { 1110 .type = AMD_IP_BLOCK_TYPE_UVD, 1111 .major = 6, 1112 .minor = 2, 1113 .rev = 0, 1114 .funcs = &uvd_v6_0_ip_funcs, 1115 }; 1116 1117 const struct amdgpu_ip_block_version uvd_v6_3_ip_block = 1118 { 1119 .type = AMD_IP_BLOCK_TYPE_UVD, 1120 .major = 6, 1121 .minor = 3, 1122 .rev = 0, 1123 .funcs = &uvd_v6_0_ip_funcs, 1124 }; 1125