1 /* 2 * Copyright 2023 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "amdgpu.h" 25 #include "amdgpu_jpeg.h" 26 #include "amdgpu_pm.h" 27 #include "soc15.h" 28 #include "soc15d.h" 29 #include "jpeg_v2_0.h" 30 #include "jpeg_v4_0_5.h" 31 #include "mmsch_v4_0.h" 32 33 #include "vcn/vcn_4_0_5_offset.h" 34 #include "vcn/vcn_4_0_5_sh_mask.h" 35 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h" 36 37 #define regUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f 38 39 static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev); 40 static void jpeg_v4_0_5_set_irq_funcs(struct amdgpu_device *adev); 41 static int jpeg_v4_0_5_set_powergating_state(void *handle, 42 enum amd_powergating_state state); 43 44 static void jpeg_v4_0_5_dec_ring_set_wptr(struct amdgpu_ring *ring); 45 46 /** 47 * jpeg_v4_0_5_early_init - set function pointers 48 * 49 * @handle: amdgpu_device pointer 50 * 51 * Set ring and irq function pointers 52 */ 53 static int jpeg_v4_0_5_early_init(void *handle) 54 { 55 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 56 57 58 adev->jpeg.num_jpeg_inst = 1; 59 adev->jpeg.num_jpeg_rings = 1; 60 61 jpeg_v4_0_5_set_dec_ring_funcs(adev); 62 jpeg_v4_0_5_set_irq_funcs(adev); 63 64 return 0; 65 } 66 67 /** 68 * jpeg_v4_0_5_sw_init - sw init for JPEG block 69 * 70 * @handle: amdgpu_device pointer 71 * 72 * Load firmware and sw initialization 73 */ 74 static int jpeg_v4_0_5_sw_init(void *handle) 75 { 76 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 77 struct amdgpu_ring *ring; 78 int r; 79 80 /* JPEG TRAP */ 81 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 82 VCN_4_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq); 83 if (r) 84 return r; 85 86 /* JPEG DJPEG POISON EVENT */ 87 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 88 VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->irq); 89 if (r) 90 return r; 91 92 /* JPEG EJPEG POISON EVENT */ 93 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 94 VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->irq); 95 if (r) 96 return r; 97 98 r = amdgpu_jpeg_sw_init(adev); 99 if (r) 100 return r; 101 102 r = amdgpu_jpeg_resume(adev); 103 if (r) 104 return r; 105 106 ring = adev->jpeg.inst->ring_dec; 107 ring->use_doorbell = true; 108 ring->doorbell_index = amdgpu_sriov_vf(adev) ? 109 (((adev->doorbell_index.vcn.vcn_ring0_1) << 1) + 4) : 110 ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1); 111 ring->vm_hub = AMDGPU_MMHUB0(0); 112 113 sprintf(ring->name, "jpeg_dec"); 114 r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, 115 AMDGPU_RING_PRIO_DEFAULT, NULL); 116 if (r) 117 return r; 118 119 adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET; 120 adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH); 121 122 return 0; 123 } 124 125 /** 126 * jpeg_v4_0_5_sw_fini - sw fini for JPEG block 127 * 128 * @handle: amdgpu_device pointer 129 * 130 * JPEG suspend and free up sw allocation 131 */ 132 static int jpeg_v4_0_5_sw_fini(void *handle) 133 { 134 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 135 int r; 136 137 r = amdgpu_jpeg_suspend(adev); 138 if (r) 139 return r; 140 141 r = amdgpu_jpeg_sw_fini(adev); 142 143 return r; 144 } 145 146 /** 147 * jpeg_v4_0_5_hw_init - start and test JPEG block 148 * 149 * @handle: amdgpu_device pointer 150 * 151 */ 152 static int jpeg_v4_0_5_hw_init(void *handle) 153 { 154 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 155 struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; 156 int r; 157 158 r = amdgpu_ring_test_helper(ring); 159 if (r) 160 return r; 161 162 DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n"); 163 164 return 0; 165 } 166 167 /** 168 * jpeg_v4_0_5_hw_fini - stop the hardware block 169 * 170 * @handle: amdgpu_device pointer 171 * 172 * Stop the JPEG block, mark ring as not ready any more 173 */ 174 static int jpeg_v4_0_5_hw_fini(void *handle) 175 { 176 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 177 178 cancel_delayed_work_sync(&adev->vcn.idle_work); 179 if (!amdgpu_sriov_vf(adev)) { 180 if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && 181 RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS)) 182 jpeg_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE); 183 } 184 amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0); 185 186 return 0; 187 } 188 189 /** 190 * jpeg_v4_0_5_suspend - suspend JPEG block 191 * 192 * @handle: amdgpu_device pointer 193 * 194 * HW fini and suspend JPEG block 195 */ 196 static int jpeg_v4_0_5_suspend(void *handle) 197 { 198 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 199 int r; 200 201 r = jpeg_v4_0_5_hw_fini(adev); 202 if (r) 203 return r; 204 205 r = amdgpu_jpeg_suspend(adev); 206 207 return r; 208 } 209 210 /** 211 * jpeg_v4_0_5_resume - resume JPEG block 212 * 213 * @handle: amdgpu_device pointer 214 * 215 * Resume firmware and hw init JPEG block 216 */ 217 static int jpeg_v4_0_5_resume(void *handle) 218 { 219 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 220 int r; 221 222 r = amdgpu_jpeg_resume(adev); 223 if (r) 224 return r; 225 226 r = jpeg_v4_0_5_hw_init(adev); 227 228 return r; 229 } 230 231 static void jpeg_v4_0_5_disable_clock_gating(struct amdgpu_device *adev) 232 { 233 uint32_t data = 0; 234 235 data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL); 236 if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) { 237 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 238 data &= (~JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK); 239 } else { 240 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 241 } 242 243 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 244 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 245 WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data); 246 247 data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE); 248 data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK 249 | JPEG_CGC_GATE__JPEG2_DEC_MASK 250 | JPEG_CGC_GATE__JMCIF_MASK 251 | JPEG_CGC_GATE__JRBBM_MASK); 252 WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data); 253 } 254 255 static void jpeg_v4_0_5_enable_clock_gating(struct amdgpu_device *adev) 256 { 257 uint32_t data = 0; 258 259 data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL); 260 if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) { 261 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 262 data |= JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK; 263 } else { 264 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 265 } 266 267 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 268 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 269 WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data); 270 271 data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE); 272 data |= (JPEG_CGC_GATE__JPEG_DEC_MASK 273 |JPEG_CGC_GATE__JPEG2_DEC_MASK 274 |JPEG_CGC_GATE__JMCIF_MASK 275 |JPEG_CGC_GATE__JRBBM_MASK); 276 WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data); 277 } 278 279 static int jpeg_v4_0_5_disable_static_power_gating(struct amdgpu_device *adev) 280 { 281 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { 282 WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_IPX_DLDO_CONFIG), 283 1 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT); 284 SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS, 285 0, UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK); 286 } 287 288 /* disable anti hang mechanism */ 289 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0, 290 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); 291 292 /* keep the JPEG in static PG mode */ 293 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0, 294 ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK); 295 296 return 0; 297 } 298 299 static int jpeg_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev) 300 { 301 /* enable anti hang mechanism */ 302 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 303 UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, 304 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); 305 306 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { 307 WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_IPX_DLDO_CONFIG), 308 2 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT); 309 SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS, 310 1 << UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS__SHIFT, 311 UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK); 312 } 313 314 return 0; 315 } 316 317 /** 318 * jpeg_v4_0_5_start - start JPEG block 319 * 320 * @adev: amdgpu_device pointer 321 * 322 * Setup and start the JPEG block 323 */ 324 static int jpeg_v4_0_5_start(struct amdgpu_device *adev) 325 { 326 struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; 327 int r; 328 329 if (adev->pm.dpm_enabled) 330 amdgpu_dpm_enable_jpeg(adev, true); 331 332 /* doorbell programming is done for every playback */ 333 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, 334 (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0); 335 336 WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL, 337 ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT | 338 VCN_JPEG_DB_CTRL__EN_MASK); 339 340 /* disable power gating */ 341 r = jpeg_v4_0_5_disable_static_power_gating(adev); 342 if (r) 343 return r; 344 345 /* JPEG disable CGC */ 346 jpeg_v4_0_5_disable_clock_gating(adev); 347 348 /* MJPEG global tiling registers */ 349 WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG, 350 adev->gfx.config.gb_addr_config); 351 352 353 /* enable JMI channel */ 354 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), 0, 355 ~UVD_JMI_CNTL__SOFT_RESET_MASK); 356 357 /* enable System Interrupt for JRBC */ 358 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regJPEG_SYS_INT_EN), 359 JPEG_SYS_INT_EN__DJRBC_MASK, 360 ~JPEG_SYS_INT_EN__DJRBC_MASK); 361 362 WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_VMID, 0); 363 WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); 364 WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW, 365 lower_32_bits(ring->gpu_addr)); 366 WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, 367 upper_32_bits(ring->gpu_addr)); 368 WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR, 0); 369 WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, 0); 370 WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, 0x00000002L); 371 WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_SIZE, ring->ring_size / 4); 372 ring->wptr = RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR); 373 374 return 0; 375 } 376 377 /** 378 * jpeg_v4_0_5_stop - stop JPEG block 379 * 380 * @adev: amdgpu_device pointer 381 * 382 * stop the JPEG block 383 */ 384 static int jpeg_v4_0_5_stop(struct amdgpu_device *adev) 385 { 386 int r; 387 388 /* reset JMI */ 389 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), 390 UVD_JMI_CNTL__SOFT_RESET_MASK, 391 ~UVD_JMI_CNTL__SOFT_RESET_MASK); 392 393 jpeg_v4_0_5_enable_clock_gating(adev); 394 395 /* enable power gating */ 396 r = jpeg_v4_0_5_enable_static_power_gating(adev); 397 if (r) 398 return r; 399 400 if (adev->pm.dpm_enabled) 401 amdgpu_dpm_enable_jpeg(adev, false); 402 403 return 0; 404 } 405 406 /** 407 * jpeg_v4_0_5_dec_ring_get_rptr - get read pointer 408 * 409 * @ring: amdgpu_ring pointer 410 * 411 * Returns the current hardware read pointer 412 */ 413 static uint64_t jpeg_v4_0_5_dec_ring_get_rptr(struct amdgpu_ring *ring) 414 { 415 struct amdgpu_device *adev = ring->adev; 416 417 return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR); 418 } 419 420 /** 421 * jpeg_v4_0_5_dec_ring_get_wptr - get write pointer 422 * 423 * @ring: amdgpu_ring pointer 424 * 425 * Returns the current hardware write pointer 426 */ 427 static uint64_t jpeg_v4_0_5_dec_ring_get_wptr(struct amdgpu_ring *ring) 428 { 429 struct amdgpu_device *adev = ring->adev; 430 431 if (ring->use_doorbell) 432 return *ring->wptr_cpu_addr; 433 else 434 return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR); 435 } 436 437 /** 438 * jpeg_v4_0_5_dec_ring_set_wptr - set write pointer 439 * 440 * @ring: amdgpu_ring pointer 441 * 442 * Commits the write pointer to the hardware 443 */ 444 static void jpeg_v4_0_5_dec_ring_set_wptr(struct amdgpu_ring *ring) 445 { 446 struct amdgpu_device *adev = ring->adev; 447 448 if (ring->use_doorbell) { 449 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); 450 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 451 } else { 452 WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); 453 } 454 } 455 456 static bool jpeg_v4_0_5_is_idle(void *handle) 457 { 458 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 459 int ret = 1; 460 461 ret &= (((RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS) & 462 UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == 463 UVD_JRBC_STATUS__RB_JOB_DONE_MASK)); 464 465 return ret; 466 } 467 468 static int jpeg_v4_0_5_wait_for_idle(void *handle) 469 { 470 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 471 472 return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC_STATUS, 473 UVD_JRBC_STATUS__RB_JOB_DONE_MASK, 474 UVD_JRBC_STATUS__RB_JOB_DONE_MASK); 475 } 476 477 static int jpeg_v4_0_5_set_clockgating_state(void *handle, 478 enum amd_clockgating_state state) 479 { 480 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 481 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 482 483 if (enable) { 484 if (!jpeg_v4_0_5_is_idle(handle)) 485 return -EBUSY; 486 jpeg_v4_0_5_enable_clock_gating(adev); 487 } else { 488 jpeg_v4_0_5_disable_clock_gating(adev); 489 } 490 491 return 0; 492 } 493 494 static int jpeg_v4_0_5_set_powergating_state(void *handle, 495 enum amd_powergating_state state) 496 { 497 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 498 int ret; 499 500 if (amdgpu_sriov_vf(adev)) { 501 adev->jpeg.cur_state = AMD_PG_STATE_UNGATE; 502 return 0; 503 } 504 505 if (state == adev->jpeg.cur_state) 506 return 0; 507 508 if (state == AMD_PG_STATE_GATE) 509 ret = jpeg_v4_0_5_stop(adev); 510 else 511 ret = jpeg_v4_0_5_start(adev); 512 513 if (!ret) 514 adev->jpeg.cur_state = state; 515 516 return ret; 517 } 518 519 static int jpeg_v4_0_5_set_interrupt_state(struct amdgpu_device *adev, 520 struct amdgpu_irq_src *source, 521 unsigned type, 522 enum amdgpu_interrupt_state state) 523 { 524 return 0; 525 } 526 527 static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev, 528 struct amdgpu_irq_src *source, 529 struct amdgpu_iv_entry *entry) 530 { 531 DRM_DEBUG("IH: JPEG TRAP\n"); 532 533 switch (entry->src_id) { 534 case VCN_4_0__SRCID__JPEG_DECODE: 535 amdgpu_fence_process(adev->jpeg.inst->ring_dec); 536 break; 537 case VCN_4_0__SRCID_DJPEG0_POISON: 538 case VCN_4_0__SRCID_EJPEG0_POISON: 539 amdgpu_jpeg_process_poison_irq(adev, source, entry); 540 break; 541 default: 542 DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", 543 entry->src_id, entry->src_data[0]); 544 break; 545 } 546 547 return 0; 548 } 549 550 static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = { 551 .name = "jpeg_v4_0_5", 552 .early_init = jpeg_v4_0_5_early_init, 553 .late_init = NULL, 554 .sw_init = jpeg_v4_0_5_sw_init, 555 .sw_fini = jpeg_v4_0_5_sw_fini, 556 .hw_init = jpeg_v4_0_5_hw_init, 557 .hw_fini = jpeg_v4_0_5_hw_fini, 558 .suspend = jpeg_v4_0_5_suspend, 559 .resume = jpeg_v4_0_5_resume, 560 .is_idle = jpeg_v4_0_5_is_idle, 561 .wait_for_idle = jpeg_v4_0_5_wait_for_idle, 562 .check_soft_reset = NULL, 563 .pre_soft_reset = NULL, 564 .soft_reset = NULL, 565 .post_soft_reset = NULL, 566 .set_clockgating_state = jpeg_v4_0_5_set_clockgating_state, 567 .set_powergating_state = jpeg_v4_0_5_set_powergating_state, 568 }; 569 570 static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = { 571 .type = AMDGPU_RING_TYPE_VCN_JPEG, 572 .align_mask = 0xf, 573 .get_rptr = jpeg_v4_0_5_dec_ring_get_rptr, 574 .get_wptr = jpeg_v4_0_5_dec_ring_get_wptr, 575 .set_wptr = jpeg_v4_0_5_dec_ring_set_wptr, 576 .emit_frame_size = 577 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 578 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + 579 8 + /* jpeg_v4_0_5_dec_ring_emit_vm_flush */ 580 18 + 18 + /* jpeg_v4_0_5_dec_ring_emit_fence x2 vm fence */ 581 8 + 16, 582 .emit_ib_size = 22, /* jpeg_v4_0_5_dec_ring_emit_ib */ 583 .emit_ib = jpeg_v2_0_dec_ring_emit_ib, 584 .emit_fence = jpeg_v2_0_dec_ring_emit_fence, 585 .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush, 586 .test_ring = amdgpu_jpeg_dec_ring_test_ring, 587 .test_ib = amdgpu_jpeg_dec_ring_test_ib, 588 .insert_nop = jpeg_v2_0_dec_ring_nop, 589 .insert_start = jpeg_v2_0_dec_ring_insert_start, 590 .insert_end = jpeg_v2_0_dec_ring_insert_end, 591 .pad_ib = amdgpu_ring_generic_pad_ib, 592 .begin_use = amdgpu_jpeg_ring_begin_use, 593 .end_use = amdgpu_jpeg_ring_end_use, 594 .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg, 595 .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait, 596 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 597 }; 598 599 static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev) 600 { 601 adev->jpeg.inst->ring_dec->funcs = &jpeg_v4_0_5_dec_ring_vm_funcs; 602 DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n"); 603 } 604 605 static const struct amdgpu_irq_src_funcs jpeg_v4_0_5_irq_funcs = { 606 .set = jpeg_v4_0_5_set_interrupt_state, 607 .process = jpeg_v4_0_5_process_interrupt, 608 }; 609 610 static void jpeg_v4_0_5_set_irq_funcs(struct amdgpu_device *adev) 611 { 612 adev->jpeg.inst->irq.num_types = 1; 613 adev->jpeg.inst->irq.funcs = &jpeg_v4_0_5_irq_funcs; 614 } 615 616 const struct amdgpu_ip_block_version jpeg_v4_0_5_ip_block = { 617 .type = AMD_IP_BLOCK_TYPE_JPEG, 618 .major = 4, 619 .minor = 0, 620 .rev = 5, 621 .funcs = &jpeg_v4_0_5_ip_funcs, 622 }; 623 624