1 /* 2 * Copyright 2023 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "amdgpu.h" 25 #include "amdgpu_jpeg.h" 26 #include "amdgpu_pm.h" 27 #include "soc15.h" 28 #include "soc15d.h" 29 #include "jpeg_v2_0.h" 30 #include "jpeg_v4_0_5.h" 31 #include "mmsch_v4_0.h" 32 33 #include "vcn/vcn_4_0_5_offset.h" 34 #include "vcn/vcn_4_0_5_sh_mask.h" 35 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h" 36 37 #define regUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f 38 39 static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev); 40 static void jpeg_v4_0_5_set_irq_funcs(struct amdgpu_device *adev); 41 static int jpeg_v4_0_5_set_powergating_state(void *handle, 42 enum amd_powergating_state state); 43 44 static void jpeg_v4_0_5_dec_ring_set_wptr(struct amdgpu_ring *ring); 45 46 /** 47 * jpeg_v4_0_5_early_init - set function pointers 48 * 49 * @handle: amdgpu_device pointer 50 * 51 * Set ring and irq function pointers 52 */ 53 static int jpeg_v4_0_5_early_init(void *handle) 54 { 55 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 56 57 58 adev->jpeg.num_jpeg_inst = 1; 59 adev->jpeg.num_jpeg_rings = 1; 60 61 jpeg_v4_0_5_set_dec_ring_funcs(adev); 62 jpeg_v4_0_5_set_irq_funcs(adev); 63 64 return 0; 65 } 66 67 /** 68 * jpeg_v4_0_5_sw_init - sw init for JPEG block 69 * 70 * @handle: amdgpu_device pointer 71 * 72 * Load firmware and sw initialization 73 */ 74 static int jpeg_v4_0_5_sw_init(void *handle) 75 { 76 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 77 struct amdgpu_ring *ring; 78 int r; 79 80 /* JPEG TRAP */ 81 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 82 VCN_4_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq); 83 if (r) 84 return r; 85 86 /* JPEG DJPEG POISON EVENT */ 87 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 88 VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->irq); 89 if (r) 90 return r; 91 92 /* JPEG EJPEG POISON EVENT */ 93 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 94 VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->irq); 95 if (r) 96 return r; 97 98 r = amdgpu_jpeg_sw_init(adev); 99 if (r) 100 return r; 101 102 r = amdgpu_jpeg_resume(adev); 103 if (r) 104 return r; 105 106 ring = adev->jpeg.inst->ring_dec; 107 ring->use_doorbell = true; 108 ring->doorbell_index = amdgpu_sriov_vf(adev) ? 109 (((adev->doorbell_index.vcn.vcn_ring0_1) << 1) + 4) : 110 ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1); 111 ring->vm_hub = AMDGPU_MMHUB0(0); 112 113 sprintf(ring->name, "jpeg_dec"); 114 r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, 115 AMDGPU_RING_PRIO_DEFAULT, NULL); 116 if (r) 117 return r; 118 119 adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET; 120 adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH); 121 122 return 0; 123 } 124 125 /** 126 * jpeg_v4_0_5_sw_fini - sw fini for JPEG block 127 * 128 * @handle: amdgpu_device pointer 129 * 130 * JPEG suspend and free up sw allocation 131 */ 132 static int jpeg_v4_0_5_sw_fini(void *handle) 133 { 134 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 135 int r; 136 137 r = amdgpu_jpeg_suspend(adev); 138 if (r) 139 return r; 140 141 r = amdgpu_jpeg_sw_fini(adev); 142 143 return r; 144 } 145 146 /** 147 * jpeg_v4_0_5_hw_init - start and test JPEG block 148 * 149 * @handle: amdgpu_device pointer 150 * 151 */ 152 static int jpeg_v4_0_5_hw_init(void *handle) 153 { 154 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 155 struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; 156 int r; 157 158 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, 159 (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0); 160 161 WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL, 162 ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT | 163 VCN_JPEG_DB_CTRL__EN_MASK); 164 165 r = amdgpu_ring_test_helper(ring); 166 if (r) 167 return r; 168 169 DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n"); 170 171 return 0; 172 } 173 174 /** 175 * jpeg_v4_0_5_hw_fini - stop the hardware block 176 * 177 * @handle: amdgpu_device pointer 178 * 179 * Stop the JPEG block, mark ring as not ready any more 180 */ 181 static int jpeg_v4_0_5_hw_fini(void *handle) 182 { 183 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 184 185 cancel_delayed_work_sync(&adev->vcn.idle_work); 186 if (!amdgpu_sriov_vf(adev)) { 187 if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && 188 RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS)) 189 jpeg_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE); 190 } 191 amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0); 192 193 return 0; 194 } 195 196 /** 197 * jpeg_v4_0_5_suspend - suspend JPEG block 198 * 199 * @handle: amdgpu_device pointer 200 * 201 * HW fini and suspend JPEG block 202 */ 203 static int jpeg_v4_0_5_suspend(void *handle) 204 { 205 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 206 int r; 207 208 r = jpeg_v4_0_5_hw_fini(adev); 209 if (r) 210 return r; 211 212 r = amdgpu_jpeg_suspend(adev); 213 214 return r; 215 } 216 217 /** 218 * jpeg_v4_0_5_resume - resume JPEG block 219 * 220 * @handle: amdgpu_device pointer 221 * 222 * Resume firmware and hw init JPEG block 223 */ 224 static int jpeg_v4_0_5_resume(void *handle) 225 { 226 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 227 int r; 228 229 r = amdgpu_jpeg_resume(adev); 230 if (r) 231 return r; 232 233 r = jpeg_v4_0_5_hw_init(adev); 234 235 return r; 236 } 237 238 static void jpeg_v4_0_5_disable_clock_gating(struct amdgpu_device *adev) 239 { 240 uint32_t data = 0; 241 242 data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL); 243 if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) { 244 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 245 data &= (~JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK); 246 } else { 247 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 248 } 249 250 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 251 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 252 WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data); 253 254 data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE); 255 data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK 256 | JPEG_CGC_GATE__JPEG2_DEC_MASK 257 | JPEG_CGC_GATE__JMCIF_MASK 258 | JPEG_CGC_GATE__JRBBM_MASK); 259 WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data); 260 } 261 262 static void jpeg_v4_0_5_enable_clock_gating(struct amdgpu_device *adev) 263 { 264 uint32_t data = 0; 265 266 data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL); 267 if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) { 268 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 269 data |= JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK; 270 } else { 271 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 272 } 273 274 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 275 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 276 WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data); 277 278 data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE); 279 data |= (JPEG_CGC_GATE__JPEG_DEC_MASK 280 |JPEG_CGC_GATE__JPEG2_DEC_MASK 281 |JPEG_CGC_GATE__JMCIF_MASK 282 |JPEG_CGC_GATE__JRBBM_MASK); 283 WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data); 284 } 285 286 static int jpeg_v4_0_5_disable_static_power_gating(struct amdgpu_device *adev) 287 { 288 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { 289 WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_IPX_DLDO_CONFIG), 290 1 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT); 291 SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS, 292 0, UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK); 293 } 294 295 /* disable anti hang mechanism */ 296 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0, 297 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); 298 299 /* keep the JPEG in static PG mode */ 300 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0, 301 ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK); 302 303 return 0; 304 } 305 306 static int jpeg_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev) 307 { 308 /* enable anti hang mechanism */ 309 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 310 UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, 311 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); 312 313 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { 314 WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_IPX_DLDO_CONFIG), 315 2 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT); 316 SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS, 317 1 << UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS__SHIFT, 318 UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK); 319 } 320 321 return 0; 322 } 323 324 /** 325 * jpeg_v4_0_5_start - start JPEG block 326 * 327 * @adev: amdgpu_device pointer 328 * 329 * Setup and start the JPEG block 330 */ 331 static int jpeg_v4_0_5_start(struct amdgpu_device *adev) 332 { 333 struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; 334 int r; 335 336 if (adev->pm.dpm_enabled) 337 amdgpu_dpm_enable_jpeg(adev, true); 338 339 /* disable power gating */ 340 r = jpeg_v4_0_5_disable_static_power_gating(adev); 341 if (r) 342 return r; 343 344 /* JPEG disable CGC */ 345 jpeg_v4_0_5_disable_clock_gating(adev); 346 347 /* MJPEG global tiling registers */ 348 WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG, 349 adev->gfx.config.gb_addr_config); 350 351 352 /* enable JMI channel */ 353 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), 0, 354 ~UVD_JMI_CNTL__SOFT_RESET_MASK); 355 356 /* enable System Interrupt for JRBC */ 357 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regJPEG_SYS_INT_EN), 358 JPEG_SYS_INT_EN__DJRBC_MASK, 359 ~JPEG_SYS_INT_EN__DJRBC_MASK); 360 361 WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_VMID, 0); 362 WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); 363 WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW, 364 lower_32_bits(ring->gpu_addr)); 365 WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, 366 upper_32_bits(ring->gpu_addr)); 367 WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR, 0); 368 WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, 0); 369 WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, 0x00000002L); 370 WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_SIZE, ring->ring_size / 4); 371 ring->wptr = RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR); 372 373 return 0; 374 } 375 376 /** 377 * jpeg_v4_0_5_stop - stop JPEG block 378 * 379 * @adev: amdgpu_device pointer 380 * 381 * stop the JPEG block 382 */ 383 static int jpeg_v4_0_5_stop(struct amdgpu_device *adev) 384 { 385 int r; 386 387 /* reset JMI */ 388 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), 389 UVD_JMI_CNTL__SOFT_RESET_MASK, 390 ~UVD_JMI_CNTL__SOFT_RESET_MASK); 391 392 jpeg_v4_0_5_enable_clock_gating(adev); 393 394 /* enable power gating */ 395 r = jpeg_v4_0_5_enable_static_power_gating(adev); 396 if (r) 397 return r; 398 399 if (adev->pm.dpm_enabled) 400 amdgpu_dpm_enable_jpeg(adev, false); 401 402 return 0; 403 } 404 405 /** 406 * jpeg_v4_0_5_dec_ring_get_rptr - get read pointer 407 * 408 * @ring: amdgpu_ring pointer 409 * 410 * Returns the current hardware read pointer 411 */ 412 static uint64_t jpeg_v4_0_5_dec_ring_get_rptr(struct amdgpu_ring *ring) 413 { 414 struct amdgpu_device *adev = ring->adev; 415 416 return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR); 417 } 418 419 /** 420 * jpeg_v4_0_5_dec_ring_get_wptr - get write pointer 421 * 422 * @ring: amdgpu_ring pointer 423 * 424 * Returns the current hardware write pointer 425 */ 426 static uint64_t jpeg_v4_0_5_dec_ring_get_wptr(struct amdgpu_ring *ring) 427 { 428 struct amdgpu_device *adev = ring->adev; 429 430 if (ring->use_doorbell) 431 return *ring->wptr_cpu_addr; 432 else 433 return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR); 434 } 435 436 /** 437 * jpeg_v4_0_5_dec_ring_set_wptr - set write pointer 438 * 439 * @ring: amdgpu_ring pointer 440 * 441 * Commits the write pointer to the hardware 442 */ 443 static void jpeg_v4_0_5_dec_ring_set_wptr(struct amdgpu_ring *ring) 444 { 445 struct amdgpu_device *adev = ring->adev; 446 447 if (ring->use_doorbell) { 448 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); 449 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 450 } else { 451 WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); 452 } 453 } 454 455 static bool jpeg_v4_0_5_is_idle(void *handle) 456 { 457 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 458 int ret = 1; 459 460 ret &= (((RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS) & 461 UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == 462 UVD_JRBC_STATUS__RB_JOB_DONE_MASK)); 463 464 return ret; 465 } 466 467 static int jpeg_v4_0_5_wait_for_idle(void *handle) 468 { 469 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 470 471 return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC_STATUS, 472 UVD_JRBC_STATUS__RB_JOB_DONE_MASK, 473 UVD_JRBC_STATUS__RB_JOB_DONE_MASK); 474 } 475 476 static int jpeg_v4_0_5_set_clockgating_state(void *handle, 477 enum amd_clockgating_state state) 478 { 479 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 480 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 481 482 if (enable) { 483 if (!jpeg_v4_0_5_is_idle(handle)) 484 return -EBUSY; 485 jpeg_v4_0_5_enable_clock_gating(adev); 486 } else { 487 jpeg_v4_0_5_disable_clock_gating(adev); 488 } 489 490 return 0; 491 } 492 493 static int jpeg_v4_0_5_set_powergating_state(void *handle, 494 enum amd_powergating_state state) 495 { 496 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 497 int ret; 498 499 if (amdgpu_sriov_vf(adev)) { 500 adev->jpeg.cur_state = AMD_PG_STATE_UNGATE; 501 return 0; 502 } 503 504 if (state == adev->jpeg.cur_state) 505 return 0; 506 507 if (state == AMD_PG_STATE_GATE) 508 ret = jpeg_v4_0_5_stop(adev); 509 else 510 ret = jpeg_v4_0_5_start(adev); 511 512 if (!ret) 513 adev->jpeg.cur_state = state; 514 515 return ret; 516 } 517 518 static int jpeg_v4_0_5_set_interrupt_state(struct amdgpu_device *adev, 519 struct amdgpu_irq_src *source, 520 unsigned type, 521 enum amdgpu_interrupt_state state) 522 { 523 return 0; 524 } 525 526 static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev, 527 struct amdgpu_irq_src *source, 528 struct amdgpu_iv_entry *entry) 529 { 530 DRM_DEBUG("IH: JPEG TRAP\n"); 531 532 switch (entry->src_id) { 533 case VCN_4_0__SRCID__JPEG_DECODE: 534 amdgpu_fence_process(adev->jpeg.inst->ring_dec); 535 break; 536 case VCN_4_0__SRCID_DJPEG0_POISON: 537 case VCN_4_0__SRCID_EJPEG0_POISON: 538 amdgpu_jpeg_process_poison_irq(adev, source, entry); 539 break; 540 default: 541 DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", 542 entry->src_id, entry->src_data[0]); 543 break; 544 } 545 546 return 0; 547 } 548 549 static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = { 550 .name = "jpeg_v4_0_5", 551 .early_init = jpeg_v4_0_5_early_init, 552 .late_init = NULL, 553 .sw_init = jpeg_v4_0_5_sw_init, 554 .sw_fini = jpeg_v4_0_5_sw_fini, 555 .hw_init = jpeg_v4_0_5_hw_init, 556 .hw_fini = jpeg_v4_0_5_hw_fini, 557 .suspend = jpeg_v4_0_5_suspend, 558 .resume = jpeg_v4_0_5_resume, 559 .is_idle = jpeg_v4_0_5_is_idle, 560 .wait_for_idle = jpeg_v4_0_5_wait_for_idle, 561 .check_soft_reset = NULL, 562 .pre_soft_reset = NULL, 563 .soft_reset = NULL, 564 .post_soft_reset = NULL, 565 .set_clockgating_state = jpeg_v4_0_5_set_clockgating_state, 566 .set_powergating_state = jpeg_v4_0_5_set_powergating_state, 567 }; 568 569 static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = { 570 .type = AMDGPU_RING_TYPE_VCN_JPEG, 571 .align_mask = 0xf, 572 .get_rptr = jpeg_v4_0_5_dec_ring_get_rptr, 573 .get_wptr = jpeg_v4_0_5_dec_ring_get_wptr, 574 .set_wptr = jpeg_v4_0_5_dec_ring_set_wptr, 575 .emit_frame_size = 576 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 577 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + 578 8 + /* jpeg_v4_0_5_dec_ring_emit_vm_flush */ 579 18 + 18 + /* jpeg_v4_0_5_dec_ring_emit_fence x2 vm fence */ 580 8 + 16, 581 .emit_ib_size = 22, /* jpeg_v4_0_5_dec_ring_emit_ib */ 582 .emit_ib = jpeg_v2_0_dec_ring_emit_ib, 583 .emit_fence = jpeg_v2_0_dec_ring_emit_fence, 584 .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush, 585 .test_ring = amdgpu_jpeg_dec_ring_test_ring, 586 .test_ib = amdgpu_jpeg_dec_ring_test_ib, 587 .insert_nop = jpeg_v2_0_dec_ring_nop, 588 .insert_start = jpeg_v2_0_dec_ring_insert_start, 589 .insert_end = jpeg_v2_0_dec_ring_insert_end, 590 .pad_ib = amdgpu_ring_generic_pad_ib, 591 .begin_use = amdgpu_jpeg_ring_begin_use, 592 .end_use = amdgpu_jpeg_ring_end_use, 593 .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg, 594 .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait, 595 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 596 }; 597 598 static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev) 599 { 600 adev->jpeg.inst->ring_dec->funcs = &jpeg_v4_0_5_dec_ring_vm_funcs; 601 DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n"); 602 } 603 604 static const struct amdgpu_irq_src_funcs jpeg_v4_0_5_irq_funcs = { 605 .set = jpeg_v4_0_5_set_interrupt_state, 606 .process = jpeg_v4_0_5_process_interrupt, 607 }; 608 609 static void jpeg_v4_0_5_set_irq_funcs(struct amdgpu_device *adev) 610 { 611 adev->jpeg.inst->irq.num_types = 1; 612 adev->jpeg.inst->irq.funcs = &jpeg_v4_0_5_irq_funcs; 613 } 614 615 const struct amdgpu_ip_block_version jpeg_v4_0_5_ip_block = { 616 .type = AMD_IP_BLOCK_TYPE_JPEG, 617 .major = 4, 618 .minor = 0, 619 .rev = 5, 620 .funcs = &jpeg_v4_0_5_ip_funcs, 621 }; 622 623