1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "amdgpu.h" 25 #include "amdgpu_jpeg.h" 26 #include "amdgpu_pm.h" 27 #include "soc15.h" 28 #include "soc15d.h" 29 #include "jpeg_v2_0.h" 30 #include "jpeg_v4_0.h" 31 #include "mmsch_v4_0.h" 32 33 #include "vcn/vcn_4_0_0_offset.h" 34 #include "vcn/vcn_4_0_0_sh_mask.h" 35 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h" 36 37 #define regUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f 38 39 static int jpeg_v4_0_start_sriov(struct amdgpu_device *adev); 40 static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev); 41 static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev); 42 static int jpeg_v4_0_set_powergating_state(void *handle, 43 enum amd_powergating_state state); 44 static void jpeg_v4_0_set_ras_funcs(struct amdgpu_device *adev); 45 46 static void jpeg_v4_0_dec_ring_set_wptr(struct amdgpu_ring *ring); 47 48 /** 49 * jpeg_v4_0_early_init - set function pointers 50 * 51 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 52 * 53 * Set ring and irq function pointers 54 */ 55 static int jpeg_v4_0_early_init(struct amdgpu_ip_block *ip_block) 56 { 57 struct amdgpu_device *adev = ip_block->adev; 58 59 60 adev->jpeg.num_jpeg_inst = 1; 61 adev->jpeg.num_jpeg_rings = 1; 62 63 jpeg_v4_0_set_dec_ring_funcs(adev); 64 jpeg_v4_0_set_irq_funcs(adev); 65 jpeg_v4_0_set_ras_funcs(adev); 66 67 return 0; 68 } 69 70 /** 71 * jpeg_v4_0_sw_init - sw init for JPEG block 72 * 73 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 74 * 75 * Load firmware and sw initialization 76 */ 77 static int jpeg_v4_0_sw_init(struct amdgpu_ip_block *ip_block) 78 { 79 struct amdgpu_device *adev = ip_block->adev; 80 struct amdgpu_ring *ring; 81 int r; 82 83 /* JPEG TRAP */ 84 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 85 VCN_4_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq); 86 if (r) 87 return r; 88 89 /* JPEG DJPEG POISON EVENT */ 90 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 91 VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq); 92 if (r) 93 return r; 94 95 /* JPEG EJPEG POISON EVENT */ 96 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 97 VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq); 98 if (r) 99 return r; 100 101 r = amdgpu_jpeg_sw_init(adev); 102 if (r) 103 return r; 104 105 r = amdgpu_jpeg_resume(adev); 106 if (r) 107 return r; 108 109 ring = adev->jpeg.inst->ring_dec; 110 ring->use_doorbell = true; 111 ring->doorbell_index = amdgpu_sriov_vf(adev) ? (((adev->doorbell_index.vcn.vcn_ring0_1) << 1) + 4) : ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1); 112 ring->vm_hub = AMDGPU_MMHUB0(0); 113 114 sprintf(ring->name, "jpeg_dec"); 115 r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, 116 AMDGPU_RING_PRIO_DEFAULT, NULL); 117 if (r) 118 return r; 119 120 adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET; 121 adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH); 122 123 r = amdgpu_jpeg_ras_sw_init(adev); 124 if (r) 125 return r; 126 /* TODO: Add queue reset mask when FW fully supports it */ 127 adev->jpeg.supported_reset = 128 amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]); 129 r = amdgpu_jpeg_sysfs_reset_mask_init(adev); 130 if (r) 131 return r; 132 133 return 0; 134 } 135 136 /** 137 * jpeg_v4_0_sw_fini - sw fini for JPEG block 138 * 139 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 140 * 141 * JPEG suspend and free up sw allocation 142 */ 143 static int jpeg_v4_0_sw_fini(struct amdgpu_ip_block *ip_block) 144 { 145 struct amdgpu_device *adev = ip_block->adev; 146 int r; 147 148 r = amdgpu_jpeg_suspend(adev); 149 if (r) 150 return r; 151 152 amdgpu_jpeg_sysfs_reset_mask_fini(adev); 153 r = amdgpu_jpeg_sw_fini(adev); 154 155 return r; 156 } 157 158 /** 159 * jpeg_v4_0_hw_init - start and test JPEG block 160 * 161 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 162 * 163 */ 164 static int jpeg_v4_0_hw_init(struct amdgpu_ip_block *ip_block) 165 { 166 struct amdgpu_device *adev = ip_block->adev; 167 struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; 168 int r; 169 170 if (amdgpu_sriov_vf(adev)) { 171 r = jpeg_v4_0_start_sriov(adev); 172 if (r) 173 return r; 174 ring->wptr = 0; 175 ring->wptr_old = 0; 176 jpeg_v4_0_dec_ring_set_wptr(ring); 177 ring->sched.ready = true; 178 } else { 179 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, 180 (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0); 181 182 WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL, 183 ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT | 184 VCN_JPEG_DB_CTRL__EN_MASK); 185 186 r = amdgpu_ring_test_helper(ring); 187 if (r) 188 return r; 189 } 190 191 return 0; 192 } 193 194 /** 195 * jpeg_v4_0_hw_fini - stop the hardware block 196 * 197 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 198 * 199 * Stop the JPEG block, mark ring as not ready any more 200 */ 201 static int jpeg_v4_0_hw_fini(struct amdgpu_ip_block *ip_block) 202 { 203 struct amdgpu_device *adev = ip_block->adev; 204 205 cancel_delayed_work_sync(&adev->jpeg.idle_work); 206 if (!amdgpu_sriov_vf(adev)) { 207 if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && 208 RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS)) 209 jpeg_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE); 210 } 211 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) 212 amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0); 213 214 return 0; 215 } 216 217 /** 218 * jpeg_v4_0_suspend - suspend JPEG block 219 * 220 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 221 * 222 * HW fini and suspend JPEG block 223 */ 224 static int jpeg_v4_0_suspend(struct amdgpu_ip_block *ip_block) 225 { 226 int r; 227 228 r = jpeg_v4_0_hw_fini(ip_block); 229 if (r) 230 return r; 231 232 r = amdgpu_jpeg_suspend(ip_block->adev); 233 234 return r; 235 } 236 237 /** 238 * jpeg_v4_0_resume - resume JPEG block 239 * 240 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 241 * 242 * Resume firmware and hw init JPEG block 243 */ 244 static int jpeg_v4_0_resume(struct amdgpu_ip_block *ip_block) 245 { 246 int r; 247 248 r = amdgpu_jpeg_resume(ip_block->adev); 249 if (r) 250 return r; 251 252 r = jpeg_v4_0_hw_init(ip_block); 253 254 return r; 255 } 256 257 static void jpeg_v4_0_disable_clock_gating(struct amdgpu_device *adev) 258 { 259 uint32_t data = 0; 260 261 data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL); 262 if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) { 263 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 264 data &= (~JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK); 265 } else { 266 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 267 } 268 269 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 270 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 271 WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data); 272 273 data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE); 274 data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK 275 | JPEG_CGC_GATE__JPEG2_DEC_MASK 276 | JPEG_CGC_GATE__JMCIF_MASK 277 | JPEG_CGC_GATE__JRBBM_MASK); 278 WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data); 279 } 280 281 static void jpeg_v4_0_enable_clock_gating(struct amdgpu_device *adev) 282 { 283 uint32_t data = 0; 284 285 data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL); 286 if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) { 287 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 288 data |= JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK; 289 } else { 290 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 291 } 292 293 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 294 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 295 WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data); 296 297 data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE); 298 data |= (JPEG_CGC_GATE__JPEG_DEC_MASK 299 |JPEG_CGC_GATE__JPEG2_DEC_MASK 300 |JPEG_CGC_GATE__JMCIF_MASK 301 |JPEG_CGC_GATE__JRBBM_MASK); 302 WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data); 303 } 304 305 static int jpeg_v4_0_disable_static_power_gating(struct amdgpu_device *adev) 306 { 307 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { 308 uint32_t data = 0; 309 int r = 0; 310 311 data = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; 312 WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_PGFSM_CONFIG), data); 313 314 r = SOC15_WAIT_ON_RREG(JPEG, 0, 315 regUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON, 316 UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); 317 318 if (r) { 319 DRM_DEV_ERROR(adev->dev, "amdgpu: JPEG disable power gating failed\n"); 320 return r; 321 } 322 } 323 324 /* disable anti hang mechanism */ 325 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0, 326 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); 327 328 /* keep the JPEG in static PG mode */ 329 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0, 330 ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK); 331 332 return 0; 333 } 334 335 static int jpeg_v4_0_enable_static_power_gating(struct amdgpu_device *adev) 336 { 337 /* enable anti hang mechanism */ 338 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 339 UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, 340 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); 341 342 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { 343 uint32_t data = 0; 344 int r = 0; 345 346 data = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; 347 WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_PGFSM_CONFIG), data); 348 349 r = SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_PGFSM_STATUS, 350 (2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT), 351 UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); 352 353 if (r) { 354 DRM_DEV_ERROR(adev->dev, "amdgpu: JPEG enable power gating failed\n"); 355 return r; 356 } 357 } 358 359 return 0; 360 } 361 362 /** 363 * jpeg_v4_0_start - start JPEG block 364 * 365 * @adev: amdgpu_device pointer 366 * 367 * Setup and start the JPEG block 368 */ 369 static int jpeg_v4_0_start(struct amdgpu_device *adev) 370 { 371 struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; 372 int r; 373 374 if (adev->pm.dpm_enabled) 375 amdgpu_dpm_enable_jpeg(adev, true); 376 377 /* disable power gating */ 378 r = jpeg_v4_0_disable_static_power_gating(adev); 379 if (r) 380 return r; 381 382 /* JPEG disable CGC */ 383 jpeg_v4_0_disable_clock_gating(adev); 384 385 /* MJPEG global tiling registers */ 386 WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG, 387 adev->gfx.config.gb_addr_config); 388 389 390 /* enable JMI channel */ 391 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), 0, 392 ~UVD_JMI_CNTL__SOFT_RESET_MASK); 393 394 /* enable System Interrupt for JRBC */ 395 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regJPEG_SYS_INT_EN), 396 JPEG_SYS_INT_EN__DJRBC_MASK, 397 ~JPEG_SYS_INT_EN__DJRBC_MASK); 398 399 WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_VMID, 0); 400 WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); 401 WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW, 402 lower_32_bits(ring->gpu_addr)); 403 WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, 404 upper_32_bits(ring->gpu_addr)); 405 WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR, 0); 406 WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, 0); 407 WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, 0x00000002L); 408 WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_SIZE, ring->ring_size / 4); 409 ring->wptr = RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR); 410 411 return 0; 412 } 413 414 static int jpeg_v4_0_start_sriov(struct amdgpu_device *adev) 415 { 416 struct amdgpu_ring *ring; 417 uint64_t ctx_addr; 418 uint32_t param, resp, expected; 419 uint32_t tmp, timeout; 420 421 struct amdgpu_mm_table *table = &adev->virt.mm_table; 422 uint32_t *table_loc; 423 uint32_t table_size; 424 uint32_t size, size_dw; 425 uint32_t init_status; 426 427 struct mmsch_v4_0_cmd_direct_write 428 direct_wt = { {0} }; 429 struct mmsch_v4_0_cmd_end end = { {0} }; 430 struct mmsch_v4_0_init_header header; 431 432 direct_wt.cmd_header.command_type = 433 MMSCH_COMMAND__DIRECT_REG_WRITE; 434 end.cmd_header.command_type = 435 MMSCH_COMMAND__END; 436 437 size = sizeof(struct mmsch_v4_0_init_header); 438 table_loc = (uint32_t *)table->cpu_addr; 439 memcpy(&header, (void *)table_loc, size); 440 441 header.version = MMSCH_VERSION; 442 header.total_size = RREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_SIZE); 443 444 header.jpegdec.init_status = 0; 445 header.jpegdec.table_offset = 0; 446 header.jpegdec.table_size = 0; 447 448 table_loc = (uint32_t *)table->cpu_addr; 449 table_loc += header.total_size; 450 451 table_size = 0; 452 453 ring = adev->jpeg.inst->ring_dec; 454 455 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(JPEG, 0, 456 regUVD_LMI_JRBC_RB_64BIT_BAR_LOW), 457 lower_32_bits(ring->gpu_addr)); 458 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(JPEG, 0, 459 regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH), 460 upper_32_bits(ring->gpu_addr)); 461 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(JPEG, 0, 462 regUVD_JRBC_RB_SIZE), ring->ring_size / 4); 463 464 /* add end packet */ 465 MMSCH_V4_0_INSERT_END(); 466 467 /* refine header */ 468 header.jpegdec.init_status = 0; 469 header.jpegdec.table_offset = header.total_size; 470 header.jpegdec.table_size = table_size; 471 header.total_size += table_size; 472 473 /* Update init table header in memory */ 474 size = sizeof(struct mmsch_v4_0_init_header); 475 table_loc = (uint32_t *)table->cpu_addr; 476 memcpy((void *)table_loc, &header, size); 477 478 /* Perform HDP flush before writing to MMSCH registers */ 479 amdgpu_device_flush_hdp(adev, NULL); 480 481 /* message MMSCH (in VCN[0]) to initialize this client 482 * 1, write to mmsch_vf_ctx_addr_lo/hi register with GPU mc addr 483 * of memory descriptor location 484 */ 485 ctx_addr = table->gpu_addr; 486 WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr)); 487 WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr)); 488 489 /* 2, update vmid of descriptor */ 490 tmp = RREG32_SOC15(VCN, 0, regMMSCH_VF_VMID); 491 tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; 492 /* use domain0 for MM scheduler */ 493 tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); 494 WREG32_SOC15(VCN, 0, regMMSCH_VF_VMID, tmp); 495 496 /* 3, notify mmsch about the size of this descriptor */ 497 size = header.total_size; 498 WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_SIZE, size); 499 500 /* 4, set resp to zero */ 501 WREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_RESP, 0); 502 503 /* 5, kick off the initialization and wait until 504 * MMSCH_VF_MAILBOX_RESP becomes non-zero 505 */ 506 param = 0x00000001; 507 WREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_HOST, param); 508 tmp = 0; 509 timeout = 1000; 510 resp = 0; 511 expected = MMSCH_VF_MAILBOX_RESP__OK; 512 init_status = ((struct mmsch_v4_0_init_header *)(table_loc))->jpegdec.init_status; 513 while (resp != expected) { 514 resp = RREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_RESP); 515 516 if (resp != 0) 517 break; 518 udelay(10); 519 tmp = tmp + 10; 520 if (tmp >= timeout) { 521 DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\ 522 " waiting for regMMSCH_VF_MAILBOX_RESP "\ 523 "(expected=0x%08x, readback=0x%08x)\n", 524 tmp, expected, resp); 525 return -EBUSY; 526 } 527 } 528 if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE 529 && init_status != MMSCH_VF_ENGINE_STATUS__PASS) { 530 DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init status for jpeg: %x\n", resp, init_status); 531 return -EINVAL; 532 } 533 534 return 0; 535 536 } 537 538 /** 539 * jpeg_v4_0_stop - stop JPEG block 540 * 541 * @adev: amdgpu_device pointer 542 * 543 * stop the JPEG block 544 */ 545 static int jpeg_v4_0_stop(struct amdgpu_device *adev) 546 { 547 int r; 548 549 /* reset JMI */ 550 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), 551 UVD_JMI_CNTL__SOFT_RESET_MASK, 552 ~UVD_JMI_CNTL__SOFT_RESET_MASK); 553 554 jpeg_v4_0_enable_clock_gating(adev); 555 556 /* enable power gating */ 557 r = jpeg_v4_0_enable_static_power_gating(adev); 558 if (r) 559 return r; 560 561 if (adev->pm.dpm_enabled) 562 amdgpu_dpm_enable_jpeg(adev, false); 563 564 return 0; 565 } 566 567 /** 568 * jpeg_v4_0_dec_ring_get_rptr - get read pointer 569 * 570 * @ring: amdgpu_ring pointer 571 * 572 * Returns the current hardware read pointer 573 */ 574 static uint64_t jpeg_v4_0_dec_ring_get_rptr(struct amdgpu_ring *ring) 575 { 576 struct amdgpu_device *adev = ring->adev; 577 578 return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR); 579 } 580 581 /** 582 * jpeg_v4_0_dec_ring_get_wptr - get write pointer 583 * 584 * @ring: amdgpu_ring pointer 585 * 586 * Returns the current hardware write pointer 587 */ 588 static uint64_t jpeg_v4_0_dec_ring_get_wptr(struct amdgpu_ring *ring) 589 { 590 struct amdgpu_device *adev = ring->adev; 591 592 if (ring->use_doorbell) 593 return *ring->wptr_cpu_addr; 594 else 595 return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR); 596 } 597 598 /** 599 * jpeg_v4_0_dec_ring_set_wptr - set write pointer 600 * 601 * @ring: amdgpu_ring pointer 602 * 603 * Commits the write pointer to the hardware 604 */ 605 static void jpeg_v4_0_dec_ring_set_wptr(struct amdgpu_ring *ring) 606 { 607 struct amdgpu_device *adev = ring->adev; 608 609 if (ring->use_doorbell) { 610 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); 611 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 612 } else { 613 WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); 614 } 615 } 616 617 static bool jpeg_v4_0_is_idle(void *handle) 618 { 619 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 620 int ret = 1; 621 622 ret &= (((RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS) & 623 UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == 624 UVD_JRBC_STATUS__RB_JOB_DONE_MASK)); 625 626 return ret; 627 } 628 629 static int jpeg_v4_0_wait_for_idle(struct amdgpu_ip_block *ip_block) 630 { 631 struct amdgpu_device *adev = ip_block->adev; 632 633 return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC_STATUS, 634 UVD_JRBC_STATUS__RB_JOB_DONE_MASK, 635 UVD_JRBC_STATUS__RB_JOB_DONE_MASK); 636 } 637 638 static int jpeg_v4_0_set_clockgating_state(void *handle, 639 enum amd_clockgating_state state) 640 { 641 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 642 bool enable = state == AMD_CG_STATE_GATE; 643 644 if (enable) { 645 if (!jpeg_v4_0_is_idle(handle)) 646 return -EBUSY; 647 jpeg_v4_0_enable_clock_gating(adev); 648 } else { 649 jpeg_v4_0_disable_clock_gating(adev); 650 } 651 652 return 0; 653 } 654 655 static int jpeg_v4_0_set_powergating_state(void *handle, 656 enum amd_powergating_state state) 657 { 658 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 659 int ret; 660 661 if (amdgpu_sriov_vf(adev)) { 662 adev->jpeg.cur_state = AMD_PG_STATE_UNGATE; 663 return 0; 664 } 665 666 if (state == adev->jpeg.cur_state) 667 return 0; 668 669 if (state == AMD_PG_STATE_GATE) 670 ret = jpeg_v4_0_stop(adev); 671 else 672 ret = jpeg_v4_0_start(adev); 673 674 if (!ret) 675 adev->jpeg.cur_state = state; 676 677 return ret; 678 } 679 680 static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev, 681 struct amdgpu_irq_src *source, 682 unsigned int type, 683 enum amdgpu_interrupt_state state) 684 { 685 return 0; 686 } 687 688 static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev, 689 struct amdgpu_irq_src *source, 690 struct amdgpu_iv_entry *entry) 691 { 692 DRM_DEBUG("IH: JPEG TRAP\n"); 693 694 switch (entry->src_id) { 695 case VCN_4_0__SRCID__JPEG_DECODE: 696 amdgpu_fence_process(adev->jpeg.inst->ring_dec); 697 break; 698 default: 699 DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", 700 entry->src_id, entry->src_data[0]); 701 break; 702 } 703 704 return 0; 705 } 706 707 static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = { 708 .name = "jpeg_v4_0", 709 .early_init = jpeg_v4_0_early_init, 710 .sw_init = jpeg_v4_0_sw_init, 711 .sw_fini = jpeg_v4_0_sw_fini, 712 .hw_init = jpeg_v4_0_hw_init, 713 .hw_fini = jpeg_v4_0_hw_fini, 714 .suspend = jpeg_v4_0_suspend, 715 .resume = jpeg_v4_0_resume, 716 .is_idle = jpeg_v4_0_is_idle, 717 .wait_for_idle = jpeg_v4_0_wait_for_idle, 718 .set_clockgating_state = jpeg_v4_0_set_clockgating_state, 719 .set_powergating_state = jpeg_v4_0_set_powergating_state, 720 }; 721 722 static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = { 723 .type = AMDGPU_RING_TYPE_VCN_JPEG, 724 .align_mask = 0xf, 725 .get_rptr = jpeg_v4_0_dec_ring_get_rptr, 726 .get_wptr = jpeg_v4_0_dec_ring_get_wptr, 727 .set_wptr = jpeg_v4_0_dec_ring_set_wptr, 728 .parse_cs = jpeg_v2_dec_ring_parse_cs, 729 .emit_frame_size = 730 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 731 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + 732 8 + /* jpeg_v4_0_dec_ring_emit_vm_flush */ 733 18 + 18 + /* jpeg_v4_0_dec_ring_emit_fence x2 vm fence */ 734 8 + 16, 735 .emit_ib_size = 22, /* jpeg_v4_0_dec_ring_emit_ib */ 736 .emit_ib = jpeg_v2_0_dec_ring_emit_ib, 737 .emit_fence = jpeg_v2_0_dec_ring_emit_fence, 738 .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush, 739 .test_ring = amdgpu_jpeg_dec_ring_test_ring, 740 .test_ib = amdgpu_jpeg_dec_ring_test_ib, 741 .insert_nop = jpeg_v2_0_dec_ring_nop, 742 .insert_start = jpeg_v2_0_dec_ring_insert_start, 743 .insert_end = jpeg_v2_0_dec_ring_insert_end, 744 .pad_ib = amdgpu_ring_generic_pad_ib, 745 .begin_use = amdgpu_jpeg_ring_begin_use, 746 .end_use = amdgpu_jpeg_ring_end_use, 747 .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg, 748 .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait, 749 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 750 }; 751 752 static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev) 753 { 754 adev->jpeg.inst->ring_dec->funcs = &jpeg_v4_0_dec_ring_vm_funcs; 755 } 756 757 static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = { 758 .process = jpeg_v4_0_process_interrupt, 759 }; 760 761 static const struct amdgpu_irq_src_funcs jpeg_v4_0_ras_irq_funcs = { 762 .set = jpeg_v4_0_set_ras_interrupt_state, 763 .process = amdgpu_jpeg_process_poison_irq, 764 }; 765 766 static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev) 767 { 768 adev->jpeg.inst->irq.num_types = 1; 769 adev->jpeg.inst->irq.funcs = &jpeg_v4_0_irq_funcs; 770 771 adev->jpeg.inst->ras_poison_irq.num_types = 1; 772 adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v4_0_ras_irq_funcs; 773 } 774 775 const struct amdgpu_ip_block_version jpeg_v4_0_ip_block = { 776 .type = AMD_IP_BLOCK_TYPE_JPEG, 777 .major = 4, 778 .minor = 0, 779 .rev = 0, 780 .funcs = &jpeg_v4_0_ip_funcs, 781 }; 782 783 static uint32_t jpeg_v4_0_query_poison_by_instance(struct amdgpu_device *adev, 784 uint32_t instance, uint32_t sub_block) 785 { 786 uint32_t poison_stat = 0, reg_value = 0; 787 788 switch (sub_block) { 789 case AMDGPU_JPEG_V4_0_JPEG0: 790 reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG0_STATUS); 791 poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG0_STATUS, POISONED_PF); 792 break; 793 case AMDGPU_JPEG_V4_0_JPEG1: 794 reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG1_STATUS); 795 poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG1_STATUS, POISONED_PF); 796 break; 797 default: 798 break; 799 } 800 801 if (poison_stat) 802 dev_info(adev->dev, "Poison detected in JPEG%d sub_block%d\n", 803 instance, sub_block); 804 805 return poison_stat; 806 } 807 808 static bool jpeg_v4_0_query_ras_poison_status(struct amdgpu_device *adev) 809 { 810 uint32_t inst = 0, sub = 0, poison_stat = 0; 811 812 for (inst = 0; inst < adev->jpeg.num_jpeg_inst; inst++) 813 for (sub = 0; sub < AMDGPU_JPEG_V4_0_MAX_SUB_BLOCK; sub++) 814 poison_stat += 815 jpeg_v4_0_query_poison_by_instance(adev, inst, sub); 816 817 return !!poison_stat; 818 } 819 820 const struct amdgpu_ras_block_hw_ops jpeg_v4_0_ras_hw_ops = { 821 .query_poison_status = jpeg_v4_0_query_ras_poison_status, 822 }; 823 824 static struct amdgpu_jpeg_ras jpeg_v4_0_ras = { 825 .ras_block = { 826 .hw_ops = &jpeg_v4_0_ras_hw_ops, 827 .ras_late_init = amdgpu_jpeg_ras_late_init, 828 }, 829 }; 830 831 static void jpeg_v4_0_set_ras_funcs(struct amdgpu_device *adev) 832 { 833 switch (amdgpu_ip_version(adev, JPEG_HWIP, 0)) { 834 case IP_VERSION(4, 0, 0): 835 adev->jpeg.ras = &jpeg_v4_0_ras; 836 break; 837 default: 838 break; 839 } 840 } 841