1 /* 2 * Copyright 2023 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "amdgpu.h" 25 #include "amdgpu_jpeg.h" 26 #include "amdgpu_pm.h" 27 #include "soc15.h" 28 #include "soc15d.h" 29 #include "jpeg_v2_0.h" 30 #include "jpeg_v4_0_5.h" 31 #include "mmsch_v4_0.h" 32 33 #include "vcn/vcn_4_0_5_offset.h" 34 #include "vcn/vcn_4_0_5_sh_mask.h" 35 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h" 36 37 #define regUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f 38 39 static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev); 40 static void jpeg_v4_0_5_set_irq_funcs(struct amdgpu_device *adev); 41 static int jpeg_v4_0_5_set_powergating_state(void *handle, 42 enum amd_powergating_state state); 43 44 static void jpeg_v4_0_5_dec_ring_set_wptr(struct amdgpu_ring *ring); 45 46 /** 47 * jpeg_v4_0_5_early_init - set function pointers 48 * 49 * @handle: amdgpu_device pointer 50 * 51 * Set ring and irq function pointers 52 */ 53 static int jpeg_v4_0_5_early_init(void *handle) 54 { 55 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 56 57 58 adev->jpeg.num_jpeg_inst = 1; 59 adev->jpeg.num_jpeg_rings = 1; 60 61 jpeg_v4_0_5_set_dec_ring_funcs(adev); 62 jpeg_v4_0_5_set_irq_funcs(adev); 63 64 return 0; 65 } 66 67 /** 68 * jpeg_v4_0_5_sw_init - sw init for JPEG block 69 * 70 * @handle: amdgpu_device pointer 71 * 72 * Load firmware and sw initialization 73 */ 74 static int jpeg_v4_0_5_sw_init(void *handle) 75 { 76 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 77 struct amdgpu_ring *ring; 78 int r; 79 80 /* JPEG TRAP */ 81 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 82 VCN_4_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq); 83 if (r) 84 return r; 85 86 /* JPEG DJPEG POISON EVENT */ 87 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 88 VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->irq); 89 if (r) 90 return r; 91 92 /* JPEG EJPEG POISON EVENT */ 93 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 94 VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->irq); 95 if (r) 96 return r; 97 98 r = amdgpu_jpeg_sw_init(adev); 99 if (r) 100 return r; 101 102 r = amdgpu_jpeg_resume(adev); 103 if (r) 104 return r; 105 106 ring = adev->jpeg.inst->ring_dec; 107 ring->use_doorbell = true; 108 ring->doorbell_index = amdgpu_sriov_vf(adev) ? 109 (((adev->doorbell_index.vcn.vcn_ring0_1) << 1) + 4) : 110 ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1); 111 ring->vm_hub = AMDGPU_MMHUB0(0); 112 113 sprintf(ring->name, "jpeg_dec"); 114 r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, 115 AMDGPU_RING_PRIO_DEFAULT, NULL); 116 if (r) 117 return r; 118 119 adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET; 120 adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH); 121 122 return 0; 123 } 124 125 /** 126 * jpeg_v4_0_5_sw_fini - sw fini for JPEG block 127 * 128 * @handle: amdgpu_device pointer 129 * 130 * JPEG suspend and free up sw allocation 131 */ 132 static int jpeg_v4_0_5_sw_fini(void *handle) 133 { 134 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 135 int r; 136 137 r = amdgpu_jpeg_suspend(adev); 138 if (r) 139 return r; 140 141 r = amdgpu_jpeg_sw_fini(adev); 142 143 return r; 144 } 145 146 /** 147 * jpeg_v4_0_5_hw_init - start and test JPEG block 148 * 149 * @handle: amdgpu_device pointer 150 * 151 */ 152 static int jpeg_v4_0_5_hw_init(void *handle) 153 { 154 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 155 struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; 156 int r; 157 158 r = amdgpu_ring_test_helper(ring); 159 if (r) 160 return r; 161 162 DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n"); 163 164 return 0; 165 } 166 167 /** 168 * jpeg_v4_0_5_hw_fini - stop the hardware block 169 * 170 * @handle: amdgpu_device pointer 171 * 172 * Stop the JPEG block, mark ring as not ready any more 173 */ 174 static int jpeg_v4_0_5_hw_fini(void *handle) 175 { 176 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 177 178 cancel_delayed_work_sync(&adev->vcn.idle_work); 179 if (!amdgpu_sriov_vf(adev)) { 180 if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && 181 RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS)) 182 jpeg_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE); 183 } 184 185 return 0; 186 } 187 188 /** 189 * jpeg_v4_0_5_suspend - suspend JPEG block 190 * 191 * @handle: amdgpu_device pointer 192 * 193 * HW fini and suspend JPEG block 194 */ 195 static int jpeg_v4_0_5_suspend(void *handle) 196 { 197 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 198 int r; 199 200 r = jpeg_v4_0_5_hw_fini(adev); 201 if (r) 202 return r; 203 204 r = amdgpu_jpeg_suspend(adev); 205 206 return r; 207 } 208 209 /** 210 * jpeg_v4_0_5_resume - resume JPEG block 211 * 212 * @handle: amdgpu_device pointer 213 * 214 * Resume firmware and hw init JPEG block 215 */ 216 static int jpeg_v4_0_5_resume(void *handle) 217 { 218 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 219 int r; 220 221 r = amdgpu_jpeg_resume(adev); 222 if (r) 223 return r; 224 225 r = jpeg_v4_0_5_hw_init(adev); 226 227 return r; 228 } 229 230 static void jpeg_v4_0_5_disable_clock_gating(struct amdgpu_device *adev) 231 { 232 uint32_t data = 0; 233 234 data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL); 235 if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) { 236 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 237 data &= (~JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK); 238 } else { 239 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 240 } 241 242 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 243 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 244 WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data); 245 246 data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE); 247 data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK 248 | JPEG_CGC_GATE__JPEG2_DEC_MASK 249 | JPEG_CGC_GATE__JMCIF_MASK 250 | JPEG_CGC_GATE__JRBBM_MASK); 251 WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data); 252 } 253 254 static void jpeg_v4_0_5_enable_clock_gating(struct amdgpu_device *adev) 255 { 256 uint32_t data = 0; 257 258 data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL); 259 if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) { 260 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 261 data |= JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK; 262 } else { 263 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 264 } 265 266 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 267 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 268 WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data); 269 270 data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE); 271 data |= (JPEG_CGC_GATE__JPEG_DEC_MASK 272 |JPEG_CGC_GATE__JPEG2_DEC_MASK 273 |JPEG_CGC_GATE__JMCIF_MASK 274 |JPEG_CGC_GATE__JRBBM_MASK); 275 WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data); 276 } 277 278 static int jpeg_v4_0_5_disable_static_power_gating(struct amdgpu_device *adev) 279 { 280 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { 281 WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_IPX_DLDO_CONFIG), 282 1 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT); 283 SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS, 284 0, UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK); 285 } 286 287 /* disable anti hang mechanism */ 288 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0, 289 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); 290 291 /* keep the JPEG in static PG mode */ 292 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0, 293 ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK); 294 295 return 0; 296 } 297 298 static int jpeg_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev) 299 { 300 /* enable anti hang mechanism */ 301 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 302 UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, 303 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); 304 305 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { 306 WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_IPX_DLDO_CONFIG), 307 2 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT); 308 SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS, 309 1 << UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS__SHIFT, 310 UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK); 311 } 312 313 return 0; 314 } 315 316 /** 317 * jpeg_v4_0_5_start - start JPEG block 318 * 319 * @adev: amdgpu_device pointer 320 * 321 * Setup and start the JPEG block 322 */ 323 static int jpeg_v4_0_5_start(struct amdgpu_device *adev) 324 { 325 struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; 326 int r; 327 328 if (adev->pm.dpm_enabled) 329 amdgpu_dpm_enable_jpeg(adev, true); 330 331 /* doorbell programming is done for every playback */ 332 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, 333 (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0); 334 335 WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL, 336 ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT | 337 VCN_JPEG_DB_CTRL__EN_MASK); 338 339 /* disable power gating */ 340 r = jpeg_v4_0_5_disable_static_power_gating(adev); 341 if (r) 342 return r; 343 344 /* JPEG disable CGC */ 345 jpeg_v4_0_5_disable_clock_gating(adev); 346 347 /* MJPEG global tiling registers */ 348 WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG, 349 adev->gfx.config.gb_addr_config); 350 351 352 /* enable JMI channel */ 353 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), 0, 354 ~UVD_JMI_CNTL__SOFT_RESET_MASK); 355 356 /* enable System Interrupt for JRBC */ 357 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regJPEG_SYS_INT_EN), 358 JPEG_SYS_INT_EN__DJRBC_MASK, 359 ~JPEG_SYS_INT_EN__DJRBC_MASK); 360 361 WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_VMID, 0); 362 WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); 363 WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW, 364 lower_32_bits(ring->gpu_addr)); 365 WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, 366 upper_32_bits(ring->gpu_addr)); 367 WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR, 0); 368 WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, 0); 369 WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, 0x00000002L); 370 WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_SIZE, ring->ring_size / 4); 371 ring->wptr = RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR); 372 373 return 0; 374 } 375 376 /** 377 * jpeg_v4_0_5_stop - stop JPEG block 378 * 379 * @adev: amdgpu_device pointer 380 * 381 * stop the JPEG block 382 */ 383 static int jpeg_v4_0_5_stop(struct amdgpu_device *adev) 384 { 385 int r; 386 387 /* reset JMI */ 388 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), 389 UVD_JMI_CNTL__SOFT_RESET_MASK, 390 ~UVD_JMI_CNTL__SOFT_RESET_MASK); 391 392 jpeg_v4_0_5_enable_clock_gating(adev); 393 394 /* enable power gating */ 395 r = jpeg_v4_0_5_enable_static_power_gating(adev); 396 if (r) 397 return r; 398 399 if (adev->pm.dpm_enabled) 400 amdgpu_dpm_enable_jpeg(adev, false); 401 402 return 0; 403 } 404 405 /** 406 * jpeg_v4_0_5_dec_ring_get_rptr - get read pointer 407 * 408 * @ring: amdgpu_ring pointer 409 * 410 * Returns the current hardware read pointer 411 */ 412 static uint64_t jpeg_v4_0_5_dec_ring_get_rptr(struct amdgpu_ring *ring) 413 { 414 struct amdgpu_device *adev = ring->adev; 415 416 return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR); 417 } 418 419 /** 420 * jpeg_v4_0_5_dec_ring_get_wptr - get write pointer 421 * 422 * @ring: amdgpu_ring pointer 423 * 424 * Returns the current hardware write pointer 425 */ 426 static uint64_t jpeg_v4_0_5_dec_ring_get_wptr(struct amdgpu_ring *ring) 427 { 428 struct amdgpu_device *adev = ring->adev; 429 430 if (ring->use_doorbell) 431 return *ring->wptr_cpu_addr; 432 else 433 return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR); 434 } 435 436 /** 437 * jpeg_v4_0_5_dec_ring_set_wptr - set write pointer 438 * 439 * @ring: amdgpu_ring pointer 440 * 441 * Commits the write pointer to the hardware 442 */ 443 static void jpeg_v4_0_5_dec_ring_set_wptr(struct amdgpu_ring *ring) 444 { 445 struct amdgpu_device *adev = ring->adev; 446 447 if (ring->use_doorbell) { 448 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); 449 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 450 } else { 451 WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); 452 } 453 } 454 455 static bool jpeg_v4_0_5_is_idle(void *handle) 456 { 457 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 458 int ret = 1; 459 460 ret &= (((RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS) & 461 UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == 462 UVD_JRBC_STATUS__RB_JOB_DONE_MASK)); 463 464 return ret; 465 } 466 467 static int jpeg_v4_0_5_wait_for_idle(void *handle) 468 { 469 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 470 471 return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC_STATUS, 472 UVD_JRBC_STATUS__RB_JOB_DONE_MASK, 473 UVD_JRBC_STATUS__RB_JOB_DONE_MASK); 474 } 475 476 static int jpeg_v4_0_5_set_clockgating_state(void *handle, 477 enum amd_clockgating_state state) 478 { 479 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 480 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 481 482 if (enable) { 483 if (!jpeg_v4_0_5_is_idle(handle)) 484 return -EBUSY; 485 jpeg_v4_0_5_enable_clock_gating(adev); 486 } else { 487 jpeg_v4_0_5_disable_clock_gating(adev); 488 } 489 490 return 0; 491 } 492 493 static int jpeg_v4_0_5_set_powergating_state(void *handle, 494 enum amd_powergating_state state) 495 { 496 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 497 int ret; 498 499 if (amdgpu_sriov_vf(adev)) { 500 adev->jpeg.cur_state = AMD_PG_STATE_UNGATE; 501 return 0; 502 } 503 504 if (state == adev->jpeg.cur_state) 505 return 0; 506 507 if (state == AMD_PG_STATE_GATE) 508 ret = jpeg_v4_0_5_stop(adev); 509 else 510 ret = jpeg_v4_0_5_start(adev); 511 512 if (!ret) 513 adev->jpeg.cur_state = state; 514 515 return ret; 516 } 517 518 static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev, 519 struct amdgpu_irq_src *source, 520 struct amdgpu_iv_entry *entry) 521 { 522 DRM_DEBUG("IH: JPEG TRAP\n"); 523 524 switch (entry->src_id) { 525 case VCN_4_0__SRCID__JPEG_DECODE: 526 amdgpu_fence_process(adev->jpeg.inst->ring_dec); 527 break; 528 case VCN_4_0__SRCID_DJPEG0_POISON: 529 case VCN_4_0__SRCID_EJPEG0_POISON: 530 amdgpu_jpeg_process_poison_irq(adev, source, entry); 531 break; 532 default: 533 DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", 534 entry->src_id, entry->src_data[0]); 535 break; 536 } 537 538 return 0; 539 } 540 541 static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = { 542 .name = "jpeg_v4_0_5", 543 .early_init = jpeg_v4_0_5_early_init, 544 .late_init = NULL, 545 .sw_init = jpeg_v4_0_5_sw_init, 546 .sw_fini = jpeg_v4_0_5_sw_fini, 547 .hw_init = jpeg_v4_0_5_hw_init, 548 .hw_fini = jpeg_v4_0_5_hw_fini, 549 .suspend = jpeg_v4_0_5_suspend, 550 .resume = jpeg_v4_0_5_resume, 551 .is_idle = jpeg_v4_0_5_is_idle, 552 .wait_for_idle = jpeg_v4_0_5_wait_for_idle, 553 .check_soft_reset = NULL, 554 .pre_soft_reset = NULL, 555 .soft_reset = NULL, 556 .post_soft_reset = NULL, 557 .set_clockgating_state = jpeg_v4_0_5_set_clockgating_state, 558 .set_powergating_state = jpeg_v4_0_5_set_powergating_state, 559 }; 560 561 static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = { 562 .type = AMDGPU_RING_TYPE_VCN_JPEG, 563 .align_mask = 0xf, 564 .get_rptr = jpeg_v4_0_5_dec_ring_get_rptr, 565 .get_wptr = jpeg_v4_0_5_dec_ring_get_wptr, 566 .set_wptr = jpeg_v4_0_5_dec_ring_set_wptr, 567 .emit_frame_size = 568 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 569 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + 570 8 + /* jpeg_v4_0_5_dec_ring_emit_vm_flush */ 571 18 + 18 + /* jpeg_v4_0_5_dec_ring_emit_fence x2 vm fence */ 572 8 + 16, 573 .emit_ib_size = 22, /* jpeg_v4_0_5_dec_ring_emit_ib */ 574 .emit_ib = jpeg_v2_0_dec_ring_emit_ib, 575 .emit_fence = jpeg_v2_0_dec_ring_emit_fence, 576 .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush, 577 .test_ring = amdgpu_jpeg_dec_ring_test_ring, 578 .test_ib = amdgpu_jpeg_dec_ring_test_ib, 579 .insert_nop = jpeg_v2_0_dec_ring_nop, 580 .insert_start = jpeg_v2_0_dec_ring_insert_start, 581 .insert_end = jpeg_v2_0_dec_ring_insert_end, 582 .pad_ib = amdgpu_ring_generic_pad_ib, 583 .begin_use = amdgpu_jpeg_ring_begin_use, 584 .end_use = amdgpu_jpeg_ring_end_use, 585 .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg, 586 .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait, 587 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 588 }; 589 590 static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev) 591 { 592 adev->jpeg.inst->ring_dec->funcs = &jpeg_v4_0_5_dec_ring_vm_funcs; 593 DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n"); 594 } 595 596 static const struct amdgpu_irq_src_funcs jpeg_v4_0_5_irq_funcs = { 597 .process = jpeg_v4_0_5_process_interrupt, 598 }; 599 600 static void jpeg_v4_0_5_set_irq_funcs(struct amdgpu_device *adev) 601 { 602 adev->jpeg.inst->irq.num_types = 1; 603 adev->jpeg.inst->irq.funcs = &jpeg_v4_0_5_irq_funcs; 604 } 605 606 const struct amdgpu_ip_block_version jpeg_v4_0_5_ip_block = { 607 .type = AMD_IP_BLOCK_TYPE_JPEG, 608 .major = 4, 609 .minor = 0, 610 .rev = 5, 611 .funcs = &jpeg_v4_0_5_ip_funcs, 612 }; 613 614