1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2014-2024 Advanced Micro Devices, Inc. All rights reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include "amdgpu.h" 25 #include "amdgpu_jpeg.h" 26 #include "amdgpu_pm.h" 27 #include "soc15.h" 28 #include "soc15d.h" 29 #include "jpeg_v4_0_3.h" 30 #include "jpeg_v5_0_1.h" 31 #include "mmsch_v5_0.h" 32 33 #include "vcn/vcn_5_0_0_offset.h" 34 #include "vcn/vcn_5_0_0_sh_mask.h" 35 #include "ivsrcid/vcn/irqsrcs_vcn_5_0.h" 36 37 static int jpeg_v5_0_1_start_sriov(struct amdgpu_device *adev); 38 static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev); 39 static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev); 40 static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block, 41 enum amd_powergating_state state); 42 static void jpeg_v5_0_1_set_ras_funcs(struct amdgpu_device *adev); 43 static void jpeg_v5_0_1_dec_ring_set_wptr(struct amdgpu_ring *ring); 44 45 static int amdgpu_ih_srcid_jpeg[] = { 46 VCN_5_0__SRCID__JPEG_DECODE, 47 VCN_5_0__SRCID__JPEG1_DECODE, 48 VCN_5_0__SRCID__JPEG2_DECODE, 49 VCN_5_0__SRCID__JPEG3_DECODE, 50 VCN_5_0__SRCID__JPEG4_DECODE, 51 VCN_5_0__SRCID__JPEG5_DECODE, 52 VCN_5_0__SRCID__JPEG6_DECODE, 53 VCN_5_0__SRCID__JPEG7_DECODE, 54 VCN_5_0__SRCID__JPEG8_DECODE, 55 VCN_5_0__SRCID__JPEG9_DECODE, 56 }; 57 58 static const struct amdgpu_hwip_reg_entry jpeg_reg_list_5_0_1[] = { 59 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_POWER_STATUS), 60 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_INT_STAT), 61 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_RPTR), 62 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_WPTR), 63 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC0_UVD_JRBC_STATUS), 64 SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_ADDR_MODE), 65 SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG), 66 SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_Y_GFX10_TILING_SURFACE), 67 SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_UV_GFX10_TILING_SURFACE), 68 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_PITCH), 69 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_UV_PITCH), 70 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC1_UVD_JRBC_RB_RPTR), 71 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC1_UVD_JRBC_RB_WPTR), 72 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC1_UVD_JRBC_STATUS), 73 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC2_UVD_JRBC_RB_RPTR), 74 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC2_UVD_JRBC_RB_WPTR), 75 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC2_UVD_JRBC_STATUS), 76 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC3_UVD_JRBC_RB_RPTR), 77 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC3_UVD_JRBC_RB_WPTR), 78 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC3_UVD_JRBC_STATUS), 79 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC4_UVD_JRBC_RB_RPTR), 80 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC4_UVD_JRBC_RB_WPTR), 81 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC4_UVD_JRBC_STATUS), 82 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC5_UVD_JRBC_RB_RPTR), 83 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC5_UVD_JRBC_RB_WPTR), 84 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC5_UVD_JRBC_STATUS), 85 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC6_UVD_JRBC_RB_RPTR), 86 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC6_UVD_JRBC_RB_WPTR), 87 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC6_UVD_JRBC_STATUS), 88 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC7_UVD_JRBC_RB_RPTR), 89 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC7_UVD_JRBC_RB_WPTR), 90 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC7_UVD_JRBC_STATUS), 91 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC8_UVD_JRBC_RB_RPTR), 92 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC8_UVD_JRBC_RB_WPTR), 93 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC8_UVD_JRBC_STATUS), 94 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC9_UVD_JRBC_RB_RPTR), 95 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC9_UVD_JRBC_RB_WPTR), 96 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC9_UVD_JRBC_STATUS), 97 }; 98 99 static int jpeg_v5_0_1_core_reg_offset(u32 pipe) 100 { 101 if (pipe <= AMDGPU_MAX_JPEG_RINGS_4_0_3) 102 return ((0x40 * pipe) - 0xc80); 103 else 104 return ((0x40 * pipe) - 0x440); 105 } 106 107 /** 108 * jpeg_v5_0_1_early_init - set function pointers 109 * 110 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 111 * 112 * Set ring and irq function pointers 113 */ 114 static int jpeg_v5_0_1_early_init(struct amdgpu_ip_block *ip_block) 115 { 116 struct amdgpu_device *adev = ip_block->adev; 117 118 if (!adev->jpeg.num_jpeg_inst || adev->jpeg.num_jpeg_inst > AMDGPU_MAX_JPEG_INSTANCES) 119 return -ENOENT; 120 121 adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS; 122 jpeg_v5_0_1_set_dec_ring_funcs(adev); 123 jpeg_v5_0_1_set_irq_funcs(adev); 124 jpeg_v5_0_1_set_ras_funcs(adev); 125 126 return 0; 127 } 128 129 /** 130 * jpeg_v5_0_1_sw_init - sw init for JPEG block 131 * 132 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 133 * 134 * Load firmware and sw initialization 135 */ 136 static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block) 137 { 138 struct amdgpu_device *adev = ip_block->adev; 139 struct amdgpu_ring *ring; 140 int i, j, r, jpeg_inst; 141 142 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { 143 /* JPEG TRAP */ 144 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 145 amdgpu_ih_srcid_jpeg[j], &adev->jpeg.inst->irq); 146 if (r) 147 return r; 148 } 149 /* JPEG DJPEG POISON EVENT */ 150 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 151 VCN_5_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq); 152 if (r) 153 return r; 154 155 /* JPEG EJPEG POISON EVENT */ 156 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 157 VCN_5_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq); 158 if (r) 159 return r; 160 161 r = amdgpu_jpeg_sw_init(adev); 162 if (r) 163 return r; 164 165 r = amdgpu_jpeg_resume(adev); 166 if (r) 167 return r; 168 169 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 170 jpeg_inst = GET_INST(JPEG, i); 171 172 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { 173 ring = &adev->jpeg.inst[i].ring_dec[j]; 174 ring->use_doorbell = true; 175 ring->vm_hub = AMDGPU_MMHUB0(adev->jpeg.inst[i].aid_id); 176 if (!amdgpu_sriov_vf(adev)) { 177 ring->doorbell_index = 178 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 179 1 + j + 11 * jpeg_inst; 180 } else { 181 ring->doorbell_index = 182 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 183 2 + j + 32 * jpeg_inst; 184 } 185 sprintf(ring->name, "jpeg_dec_%d.%d", adev->jpeg.inst[i].aid_id, j); 186 r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, 187 AMDGPU_RING_PRIO_DEFAULT, NULL); 188 if (r) 189 return r; 190 191 adev->jpeg.internal.jpeg_pitch[j] = 192 regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET; 193 adev->jpeg.inst[i].external.jpeg_pitch[j] = 194 SOC15_REG_OFFSET1(JPEG, jpeg_inst, regUVD_JRBC_SCRATCH0, 195 (j ? jpeg_v5_0_1_core_reg_offset(j) : 0)); 196 } 197 } 198 199 r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_5_0_1, ARRAY_SIZE(jpeg_reg_list_5_0_1)); 200 if (r) 201 return r; 202 203 adev->jpeg.supported_reset = 204 amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]); 205 if (!amdgpu_sriov_vf(adev)) 206 adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; 207 r = amdgpu_jpeg_sysfs_reset_mask_init(adev); 208 209 return r; 210 } 211 212 /** 213 * jpeg_v5_0_1_sw_fini - sw fini for JPEG block 214 * 215 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 216 * 217 * JPEG suspend and free up sw allocation 218 */ 219 static int jpeg_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block) 220 { 221 struct amdgpu_device *adev = ip_block->adev; 222 int r; 223 224 r = amdgpu_jpeg_suspend(adev); 225 if (r) 226 return r; 227 228 amdgpu_jpeg_sysfs_reset_mask_fini(adev); 229 230 r = amdgpu_jpeg_sw_fini(adev); 231 232 return r; 233 } 234 235 /** 236 * jpeg_v5_0_1_hw_init - start and test JPEG block 237 * 238 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 239 * 240 */ 241 static int jpeg_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block) 242 { 243 struct amdgpu_device *adev = ip_block->adev; 244 struct amdgpu_ring *ring; 245 int i, j, r, jpeg_inst; 246 247 if (amdgpu_sriov_vf(adev)) { 248 r = jpeg_v5_0_1_start_sriov(adev); 249 if (r) 250 return r; 251 252 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 253 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { 254 ring = &adev->jpeg.inst[i].ring_dec[j]; 255 ring->wptr = 0; 256 ring->wptr_old = 0; 257 jpeg_v5_0_1_dec_ring_set_wptr(ring); 258 ring->sched.ready = true; 259 } 260 } 261 return 0; 262 } 263 if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100) 264 adev->jpeg.caps |= AMDGPU_JPEG_CAPS(RRMT_ENABLED); 265 266 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 267 jpeg_inst = GET_INST(JPEG, i); 268 ring = adev->jpeg.inst[i].ring_dec; 269 if (ring->use_doorbell) 270 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, 271 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 11 * jpeg_inst, 272 adev->jpeg.inst[i].aid_id); 273 274 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { 275 ring = &adev->jpeg.inst[i].ring_dec[j]; 276 if (ring->use_doorbell) 277 WREG32_SOC15_OFFSET(VCN, GET_INST(VCN, i), regVCN_JPEG_DB_CTRL, 278 ring->pipe, 279 ring->doorbell_index << 280 VCN_JPEG_DB_CTRL__OFFSET__SHIFT | 281 VCN_JPEG_DB_CTRL__EN_MASK); 282 r = amdgpu_ring_test_helper(ring); 283 if (r) 284 return r; 285 } 286 } 287 288 return 0; 289 } 290 291 /** 292 * jpeg_v5_0_1_hw_fini - stop the hardware block 293 * 294 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 295 * 296 * Stop the JPEG block, mark ring as not ready any more 297 */ 298 static int jpeg_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block) 299 { 300 struct amdgpu_device *adev = ip_block->adev; 301 int ret = 0; 302 303 cancel_delayed_work_sync(&adev->jpeg.idle_work); 304 305 if (!amdgpu_sriov_vf(adev)) { 306 if (adev->jpeg.cur_state != AMD_PG_STATE_GATE) 307 ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE); 308 } 309 310 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) 311 amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0); 312 313 return ret; 314 } 315 316 /** 317 * jpeg_v5_0_1_suspend - suspend JPEG block 318 * 319 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 320 * 321 * HW fini and suspend JPEG block 322 */ 323 static int jpeg_v5_0_1_suspend(struct amdgpu_ip_block *ip_block) 324 { 325 struct amdgpu_device *adev = ip_block->adev; 326 int r; 327 328 r = jpeg_v5_0_1_hw_fini(ip_block); 329 if (r) 330 return r; 331 332 r = amdgpu_jpeg_suspend(adev); 333 334 return r; 335 } 336 337 /** 338 * jpeg_v5_0_1_resume - resume JPEG block 339 * 340 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 341 * 342 * Resume firmware and hw init JPEG block 343 */ 344 static int jpeg_v5_0_1_resume(struct amdgpu_ip_block *ip_block) 345 { 346 struct amdgpu_device *adev = ip_block->adev; 347 int r; 348 349 r = amdgpu_jpeg_resume(adev); 350 if (r) 351 return r; 352 353 r = jpeg_v5_0_1_hw_init(ip_block); 354 355 return r; 356 } 357 358 static void jpeg_v5_0_1_init_inst(struct amdgpu_device *adev, int i) 359 { 360 int jpeg_inst = GET_INST(JPEG, i); 361 362 /* disable anti hang mechanism */ 363 WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS), 0, 364 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); 365 366 /* keep the JPEG in static PG mode */ 367 WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS), 0, 368 ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK); 369 370 /* MJPEG global tiling registers */ 371 WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG, 372 adev->gfx.config.gb_addr_config); 373 374 /* enable JMI channel */ 375 WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), 0, 376 ~UVD_JMI_CNTL__SOFT_RESET_MASK); 377 } 378 379 static void jpeg_v5_0_1_deinit_inst(struct amdgpu_device *adev, int i) 380 { 381 int jpeg_inst = GET_INST(JPEG, i); 382 /* reset JMI */ 383 WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), 384 UVD_JMI_CNTL__SOFT_RESET_MASK, 385 ~UVD_JMI_CNTL__SOFT_RESET_MASK); 386 387 /* enable anti hang mechanism */ 388 WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS), 389 UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, 390 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); 391 } 392 393 static void jpeg_v5_0_1_init_jrbc(struct amdgpu_ring *ring) 394 { 395 struct amdgpu_device *adev = ring->adev; 396 u32 reg, data, mask; 397 int jpeg_inst = GET_INST(JPEG, ring->me); 398 int reg_offset = ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0; 399 400 /* enable System Interrupt for JRBC */ 401 reg = SOC15_REG_OFFSET(JPEG, jpeg_inst, regJPEG_SYS_INT_EN); 402 if (ring->pipe < AMDGPU_MAX_JPEG_RINGS_4_0_3) { 403 data = JPEG_SYS_INT_EN__DJRBC0_MASK << ring->pipe; 404 mask = ~(JPEG_SYS_INT_EN__DJRBC0_MASK << ring->pipe); 405 WREG32_P(reg, data, mask); 406 } else { 407 data = JPEG_SYS_INT_EN__DJRBC0_MASK << (ring->pipe+12); 408 mask = ~(JPEG_SYS_INT_EN__DJRBC0_MASK << (ring->pipe+12)); 409 WREG32_P(reg, data, mask); 410 } 411 412 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 413 regUVD_LMI_JRBC_RB_VMID, 414 reg_offset, 0); 415 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 416 regUVD_JRBC_RB_CNTL, 417 reg_offset, 418 (0x00000001L | 0x00000002L)); 419 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 420 regUVD_LMI_JRBC_RB_64BIT_BAR_LOW, 421 reg_offset, lower_32_bits(ring->gpu_addr)); 422 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 423 regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, 424 reg_offset, upper_32_bits(ring->gpu_addr)); 425 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 426 regUVD_JRBC_RB_RPTR, 427 reg_offset, 0); 428 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 429 regUVD_JRBC_RB_WPTR, 430 reg_offset, 0); 431 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 432 regUVD_JRBC_RB_CNTL, 433 reg_offset, 0x00000002L); 434 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 435 regUVD_JRBC_RB_SIZE, 436 reg_offset, ring->ring_size / 4); 437 ring->wptr = RREG32_SOC15_OFFSET(JPEG, jpeg_inst, regUVD_JRBC_RB_WPTR, 438 reg_offset); 439 } 440 441 static int jpeg_v5_0_1_start_sriov(struct amdgpu_device *adev) 442 { 443 struct amdgpu_ring *ring; 444 uint64_t ctx_addr; 445 uint32_t param, resp, expected; 446 uint32_t tmp, timeout; 447 448 struct amdgpu_mm_table *table = &adev->virt.mm_table; 449 uint32_t *table_loc; 450 uint32_t table_size; 451 uint32_t size, size_dw, item_offset; 452 uint32_t init_status; 453 int i, j, jpeg_inst; 454 455 struct mmsch_v5_0_cmd_direct_write 456 direct_wt = { {0} }; 457 struct mmsch_v5_0_cmd_end end = { {0} }; 458 struct mmsch_v5_0_init_header header; 459 460 direct_wt.cmd_header.command_type = 461 MMSCH_COMMAND__DIRECT_REG_WRITE; 462 end.cmd_header.command_type = 463 MMSCH_COMMAND__END; 464 465 for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { 466 jpeg_inst = GET_INST(JPEG, i); 467 468 memset(&header, 0, sizeof(struct mmsch_v5_0_init_header)); 469 header.version = MMSCH_VERSION; 470 header.total_size = sizeof(struct mmsch_v5_0_init_header) >> 2; 471 472 table_loc = (uint32_t *)table->cpu_addr; 473 table_loc += header.total_size; 474 475 item_offset = header.total_size; 476 477 for (j = 0; j < adev->jpeg.num_jpeg_rings; j++) { 478 ring = &adev->jpeg.inst[i].ring_dec[j]; 479 table_size = 0; 480 481 tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW); 482 MMSCH_V5_0_INSERT_DIRECT_WT(tmp, lower_32_bits(ring->gpu_addr)); 483 tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH); 484 MMSCH_V5_0_INSERT_DIRECT_WT(tmp, upper_32_bits(ring->gpu_addr)); 485 tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_JRBC_RB_SIZE); 486 MMSCH_V5_0_INSERT_DIRECT_WT(tmp, ring->ring_size / 4); 487 488 if (j < 5) { 489 header.mjpegdec0[j].table_offset = item_offset; 490 header.mjpegdec0[j].init_status = 0; 491 header.mjpegdec0[j].table_size = table_size; 492 } else { 493 header.mjpegdec1[j - 5].table_offset = item_offset; 494 header.mjpegdec1[j - 5].init_status = 0; 495 header.mjpegdec1[j - 5].table_size = table_size; 496 } 497 header.total_size += table_size; 498 item_offset += table_size; 499 } 500 501 MMSCH_V5_0_INSERT_END(); 502 503 /* send init table to MMSCH */ 504 size = sizeof(struct mmsch_v5_0_init_header); 505 table_loc = (uint32_t *)table->cpu_addr; 506 memcpy((void *)table_loc, &header, size); 507 508 ctx_addr = table->gpu_addr; 509 WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr)); 510 WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr)); 511 512 tmp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID); 513 tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; 514 tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); 515 WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID, tmp); 516 517 size = header.total_size; 518 WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_SIZE, size); 519 520 WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP, 0); 521 522 param = 0x00000001; 523 WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_HOST, param); 524 tmp = 0; 525 timeout = 1000; 526 resp = 0; 527 expected = MMSCH_VF_MAILBOX_RESP__OK; 528 init_status = 529 ((struct mmsch_v5_0_init_header *)(table_loc))->mjpegdec0[i].init_status; 530 while (resp != expected) { 531 resp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP); 532 533 if (resp != 0) 534 break; 535 udelay(10); 536 tmp = tmp + 10; 537 if (tmp >= timeout) { 538 DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\ 539 " waiting for regMMSCH_VF_MAILBOX_RESP "\ 540 "(expected=0x%08x, readback=0x%08x)\n", 541 tmp, expected, resp); 542 return -EBUSY; 543 } 544 } 545 if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE && 546 init_status != MMSCH_VF_ENGINE_STATUS__PASS) 547 DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init status for jpeg: %x\n", 548 resp, init_status); 549 550 } 551 return 0; 552 } 553 554 /** 555 * jpeg_v5_0_1_start - start JPEG block 556 * 557 * @adev: amdgpu_device pointer 558 * 559 * Setup and start the JPEG block 560 */ 561 static int jpeg_v5_0_1_start(struct amdgpu_device *adev) 562 { 563 struct amdgpu_ring *ring; 564 int i, j; 565 566 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 567 jpeg_v5_0_1_init_inst(adev, i); 568 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { 569 ring = &adev->jpeg.inst[i].ring_dec[j]; 570 jpeg_v5_0_1_init_jrbc(ring); 571 } 572 } 573 574 return 0; 575 } 576 577 /** 578 * jpeg_v5_0_1_stop - stop JPEG block 579 * 580 * @adev: amdgpu_device pointer 581 * 582 * stop the JPEG block 583 */ 584 static int jpeg_v5_0_1_stop(struct amdgpu_device *adev) 585 { 586 int i; 587 588 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) 589 jpeg_v5_0_1_deinit_inst(adev, i); 590 591 return 0; 592 } 593 594 /** 595 * jpeg_v5_0_1_dec_ring_get_rptr - get read pointer 596 * 597 * @ring: amdgpu_ring pointer 598 * 599 * Returns the current hardware read pointer 600 */ 601 static uint64_t jpeg_v5_0_1_dec_ring_get_rptr(struct amdgpu_ring *ring) 602 { 603 struct amdgpu_device *adev = ring->adev; 604 605 return RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC_RB_RPTR, 606 ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0); 607 } 608 609 /** 610 * jpeg_v5_0_1_dec_ring_get_wptr - get write pointer 611 * 612 * @ring: amdgpu_ring pointer 613 * 614 * Returns the current hardware write pointer 615 */ 616 static uint64_t jpeg_v5_0_1_dec_ring_get_wptr(struct amdgpu_ring *ring) 617 { 618 struct amdgpu_device *adev = ring->adev; 619 620 if (ring->use_doorbell) 621 return adev->wb.wb[ring->wptr_offs]; 622 623 return RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC_RB_WPTR, 624 ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0); 625 } 626 627 /** 628 * jpeg_v5_0_1_dec_ring_set_wptr - set write pointer 629 * 630 * @ring: amdgpu_ring pointer 631 * 632 * Commits the write pointer to the hardware 633 */ 634 static void jpeg_v5_0_1_dec_ring_set_wptr(struct amdgpu_ring *ring) 635 { 636 struct amdgpu_device *adev = ring->adev; 637 638 if (ring->use_doorbell) { 639 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); 640 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 641 } else { 642 WREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me), 643 regUVD_JRBC_RB_WPTR, 644 (ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0), 645 lower_32_bits(ring->wptr)); 646 } 647 } 648 649 static bool jpeg_v5_0_1_is_idle(struct amdgpu_ip_block *ip_block) 650 { 651 struct amdgpu_device *adev = ip_block->adev; 652 bool ret = false; 653 int i, j; 654 655 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 656 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { 657 int reg_offset = (j ? jpeg_v5_0_1_core_reg_offset(j) : 0); 658 659 ret &= ((RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, i), 660 regUVD_JRBC_STATUS, reg_offset) & 661 UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == 662 UVD_JRBC_STATUS__RB_JOB_DONE_MASK); 663 } 664 } 665 666 return ret; 667 } 668 669 static int jpeg_v5_0_1_wait_for_idle(struct amdgpu_ip_block *ip_block) 670 { 671 struct amdgpu_device *adev = ip_block->adev; 672 int ret = 0; 673 int i, j; 674 675 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 676 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { 677 int reg_offset = (j ? jpeg_v5_0_1_core_reg_offset(j) : 0); 678 679 ret &= SOC15_WAIT_ON_RREG_OFFSET(JPEG, GET_INST(JPEG, i), 680 regUVD_JRBC_STATUS, reg_offset, 681 UVD_JRBC_STATUS__RB_JOB_DONE_MASK, 682 UVD_JRBC_STATUS__RB_JOB_DONE_MASK); 683 } 684 } 685 return ret; 686 } 687 688 static int jpeg_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block, 689 enum amd_clockgating_state state) 690 { 691 struct amdgpu_device *adev = ip_block->adev; 692 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 693 694 int i; 695 696 if (!enable) 697 return 0; 698 699 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 700 if (!jpeg_v5_0_1_is_idle(ip_block)) 701 return -EBUSY; 702 } 703 704 return 0; 705 } 706 707 static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block, 708 enum amd_powergating_state state) 709 { 710 struct amdgpu_device *adev = ip_block->adev; 711 int ret; 712 713 if (amdgpu_sriov_vf(adev)) { 714 adev->jpeg.cur_state = AMD_PG_STATE_UNGATE; 715 return 0; 716 } 717 718 if (state == adev->jpeg.cur_state) 719 return 0; 720 721 if (state == AMD_PG_STATE_GATE) 722 ret = jpeg_v5_0_1_stop(adev); 723 else 724 ret = jpeg_v5_0_1_start(adev); 725 726 if (!ret) 727 adev->jpeg.cur_state = state; 728 729 return ret; 730 } 731 732 static int jpeg_v5_0_1_set_interrupt_state(struct amdgpu_device *adev, 733 struct amdgpu_irq_src *source, 734 unsigned int type, 735 enum amdgpu_interrupt_state state) 736 { 737 return 0; 738 } 739 740 static int jpeg_v5_0_1_set_ras_interrupt_state(struct amdgpu_device *adev, 741 struct amdgpu_irq_src *source, 742 unsigned int type, 743 enum amdgpu_interrupt_state state) 744 { 745 return 0; 746 } 747 748 749 750 static int jpeg_v5_0_1_process_interrupt(struct amdgpu_device *adev, 751 struct amdgpu_irq_src *source, 752 struct amdgpu_iv_entry *entry) 753 { 754 u32 i, inst; 755 756 i = node_id_to_phys_map[entry->node_id]; 757 DRM_DEV_DEBUG(adev->dev, "IH: JPEG TRAP\n"); 758 759 for (inst = 0; inst < adev->jpeg.num_jpeg_inst; ++inst) 760 if (adev->jpeg.inst[inst].aid_id == i) 761 break; 762 763 if (inst >= adev->jpeg.num_jpeg_inst) { 764 dev_WARN_ONCE(adev->dev, 1, 765 "Interrupt received for unknown JPEG instance %d", 766 entry->node_id); 767 return 0; 768 } 769 770 switch (entry->src_id) { 771 case VCN_5_0__SRCID__JPEG_DECODE: 772 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[0]); 773 break; 774 case VCN_5_0__SRCID__JPEG1_DECODE: 775 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[1]); 776 break; 777 case VCN_5_0__SRCID__JPEG2_DECODE: 778 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[2]); 779 break; 780 case VCN_5_0__SRCID__JPEG3_DECODE: 781 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[3]); 782 break; 783 case VCN_5_0__SRCID__JPEG4_DECODE: 784 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[4]); 785 break; 786 case VCN_5_0__SRCID__JPEG5_DECODE: 787 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[5]); 788 break; 789 case VCN_5_0__SRCID__JPEG6_DECODE: 790 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[6]); 791 break; 792 case VCN_5_0__SRCID__JPEG7_DECODE: 793 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[7]); 794 break; 795 case VCN_5_0__SRCID__JPEG8_DECODE: 796 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[8]); 797 break; 798 case VCN_5_0__SRCID__JPEG9_DECODE: 799 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[9]); 800 break; 801 default: 802 DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", 803 entry->src_id, entry->src_data[0]); 804 break; 805 } 806 807 return 0; 808 } 809 810 static void jpeg_v5_0_1_core_stall_reset(struct amdgpu_ring *ring) 811 { 812 struct amdgpu_device *adev = ring->adev; 813 int jpeg_inst = GET_INST(JPEG, ring->me); 814 int reg_offset = ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0; 815 816 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 817 regUVD_JMI0_UVD_JMI_CLIENT_STALL, 818 reg_offset, 0x1F); 819 SOC15_WAIT_ON_RREG_OFFSET(JPEG, jpeg_inst, 820 regUVD_JMI0_UVD_JMI_CLIENT_CLEAN_STATUS, 821 reg_offset, 0x1F, 0x1F); 822 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 823 regUVD_JMI0_JPEG_LMI_DROP, 824 reg_offset, 0x1F); 825 WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 1 << ring->pipe); 826 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 827 regUVD_JMI0_UVD_JMI_CLIENT_STALL, 828 reg_offset, 0x00); 829 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 830 regUVD_JMI0_JPEG_LMI_DROP, 831 reg_offset, 0x00); 832 WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 0x00); 833 } 834 835 static int jpeg_v5_0_1_ring_reset(struct amdgpu_ring *ring, 836 unsigned int vmid, 837 struct amdgpu_fence *timedout_fence) 838 { 839 amdgpu_ring_reset_helper_begin(ring, timedout_fence); 840 jpeg_v5_0_1_core_stall_reset(ring); 841 jpeg_v5_0_1_init_jrbc(ring); 842 return amdgpu_ring_reset_helper_end(ring, timedout_fence); 843 } 844 845 static const struct amd_ip_funcs jpeg_v5_0_1_ip_funcs = { 846 .name = "jpeg_v5_0_1", 847 .early_init = jpeg_v5_0_1_early_init, 848 .late_init = NULL, 849 .sw_init = jpeg_v5_0_1_sw_init, 850 .sw_fini = jpeg_v5_0_1_sw_fini, 851 .hw_init = jpeg_v5_0_1_hw_init, 852 .hw_fini = jpeg_v5_0_1_hw_fini, 853 .suspend = jpeg_v5_0_1_suspend, 854 .resume = jpeg_v5_0_1_resume, 855 .is_idle = jpeg_v5_0_1_is_idle, 856 .wait_for_idle = jpeg_v5_0_1_wait_for_idle, 857 .check_soft_reset = NULL, 858 .pre_soft_reset = NULL, 859 .soft_reset = NULL, 860 .post_soft_reset = NULL, 861 .set_clockgating_state = jpeg_v5_0_1_set_clockgating_state, 862 .set_powergating_state = jpeg_v5_0_1_set_powergating_state, 863 .dump_ip_state = amdgpu_jpeg_dump_ip_state, 864 .print_ip_state = amdgpu_jpeg_print_ip_state, 865 }; 866 867 static const struct amdgpu_ring_funcs jpeg_v5_0_1_dec_ring_vm_funcs = { 868 .type = AMDGPU_RING_TYPE_VCN_JPEG, 869 .align_mask = 0xf, 870 .get_rptr = jpeg_v5_0_1_dec_ring_get_rptr, 871 .get_wptr = jpeg_v5_0_1_dec_ring_get_wptr, 872 .set_wptr = jpeg_v5_0_1_dec_ring_set_wptr, 873 .emit_frame_size = 874 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 875 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + 876 8 + /* jpeg_v5_0_1_dec_ring_emit_vm_flush */ 877 22 + 22 + /* jpeg_v5_0_1_dec_ring_emit_fence x2 vm fence */ 878 8 + 16, 879 .emit_ib_size = 22, /* jpeg_v5_0_1_dec_ring_emit_ib */ 880 .emit_ib = jpeg_v4_0_3_dec_ring_emit_ib, 881 .emit_fence = jpeg_v4_0_3_dec_ring_emit_fence, 882 .emit_vm_flush = jpeg_v4_0_3_dec_ring_emit_vm_flush, 883 .emit_hdp_flush = jpeg_v4_0_3_ring_emit_hdp_flush, 884 .test_ring = amdgpu_jpeg_dec_ring_test_ring, 885 .test_ib = amdgpu_jpeg_dec_ring_test_ib, 886 .insert_nop = jpeg_v4_0_3_dec_ring_nop, 887 .insert_start = jpeg_v4_0_3_dec_ring_insert_start, 888 .insert_end = jpeg_v4_0_3_dec_ring_insert_end, 889 .pad_ib = amdgpu_ring_generic_pad_ib, 890 .begin_use = amdgpu_jpeg_ring_begin_use, 891 .end_use = amdgpu_jpeg_ring_end_use, 892 .emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg, 893 .emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait, 894 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 895 .reset = jpeg_v5_0_1_ring_reset, 896 }; 897 898 static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev) 899 { 900 int i, j, jpeg_inst; 901 902 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 903 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { 904 adev->jpeg.inst[i].ring_dec[j].funcs = &jpeg_v5_0_1_dec_ring_vm_funcs; 905 adev->jpeg.inst[i].ring_dec[j].me = i; 906 adev->jpeg.inst[i].ring_dec[j].pipe = j; 907 } 908 jpeg_inst = GET_INST(JPEG, i); 909 adev->jpeg.inst[i].aid_id = 910 jpeg_inst / adev->jpeg.num_inst_per_aid; 911 } 912 } 913 914 static const struct amdgpu_irq_src_funcs jpeg_v5_0_1_irq_funcs = { 915 .set = jpeg_v5_0_1_set_interrupt_state, 916 .process = jpeg_v5_0_1_process_interrupt, 917 }; 918 919 static const struct amdgpu_irq_src_funcs jpeg_v5_0_1_ras_irq_funcs = { 920 .set = jpeg_v5_0_1_set_ras_interrupt_state, 921 .process = amdgpu_jpeg_process_poison_irq, 922 }; 923 924 static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev) 925 { 926 int i; 927 928 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) 929 adev->jpeg.inst->irq.num_types += adev->jpeg.num_jpeg_rings; 930 931 adev->jpeg.inst->irq.funcs = &jpeg_v5_0_1_irq_funcs; 932 933 adev->jpeg.inst->ras_poison_irq.num_types = 1; 934 adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v5_0_1_ras_irq_funcs; 935 936 } 937 938 const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block = { 939 .type = AMD_IP_BLOCK_TYPE_JPEG, 940 .major = 5, 941 .minor = 0, 942 .rev = 1, 943 .funcs = &jpeg_v5_0_1_ip_funcs, 944 }; 945 946 static uint32_t jpeg_v5_0_1_query_poison_by_instance(struct amdgpu_device *adev, 947 uint32_t instance, uint32_t sub_block) 948 { 949 uint32_t poison_stat = 0, reg_value = 0; 950 951 switch (sub_block) { 952 case AMDGPU_JPEG_V5_0_1_JPEG0: 953 reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG0_STATUS); 954 poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG0_STATUS, POISONED_PF); 955 break; 956 case AMDGPU_JPEG_V5_0_1_JPEG1: 957 reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG1_STATUS); 958 poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG1_STATUS, POISONED_PF); 959 break; 960 default: 961 break; 962 } 963 964 if (poison_stat) 965 dev_info(adev->dev, "Poison detected in JPEG%d sub_block%d\n", 966 instance, sub_block); 967 968 return poison_stat; 969 } 970 971 static bool jpeg_v5_0_1_query_ras_poison_status(struct amdgpu_device *adev) 972 { 973 uint32_t inst = 0, sub = 0, poison_stat = 0; 974 975 for (inst = 0; inst < adev->jpeg.num_jpeg_inst; inst++) 976 for (sub = 0; sub < AMDGPU_JPEG_V5_0_1_MAX_SUB_BLOCK; sub++) 977 poison_stat += 978 jpeg_v5_0_1_query_poison_by_instance(adev, inst, sub); 979 980 return !!poison_stat; 981 } 982 983 static const struct amdgpu_ras_block_hw_ops jpeg_v5_0_1_ras_hw_ops = { 984 .query_poison_status = jpeg_v5_0_1_query_ras_poison_status, 985 }; 986 987 static int jpeg_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, 988 enum aca_smu_type type, void *data) 989 { 990 struct aca_bank_info info; 991 u64 misc0; 992 int ret; 993 994 ret = aca_bank_info_decode(bank, &info); 995 if (ret) 996 return ret; 997 998 misc0 = bank->regs[ACA_REG_IDX_MISC0]; 999 switch (type) { 1000 case ACA_SMU_TYPE_UE: 1001 bank->aca_err_type = ACA_ERROR_TYPE_UE; 1002 ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, 1003 1ULL); 1004 break; 1005 case ACA_SMU_TYPE_CE: 1006 bank->aca_err_type = ACA_ERROR_TYPE_CE; 1007 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 1008 ACA_REG__MISC0__ERRCNT(misc0)); 1009 break; 1010 default: 1011 return -EINVAL; 1012 } 1013 1014 return ret; 1015 } 1016 1017 /* reference to smu driver if header file */ 1018 static int jpeg_v5_0_1_err_codes[] = { 1019 16, 17, 18, 19, 20, 21, 22, 23, /* JPEG[0-7][S|D] */ 1020 24, 25, 26, 27, 28, 29, 30, 31 1021 }; 1022 1023 static bool jpeg_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, 1024 enum aca_smu_type type, void *data) 1025 { 1026 u32 instlo; 1027 1028 instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); 1029 instlo &= GENMASK(31, 1); 1030 1031 if (instlo != mmSMNAID_AID0_MCA_SMU) 1032 return false; 1033 1034 if (aca_bank_check_error_codes(handle->adev, bank, 1035 jpeg_v5_0_1_err_codes, 1036 ARRAY_SIZE(jpeg_v5_0_1_err_codes))) 1037 return false; 1038 1039 return true; 1040 } 1041 1042 static const struct aca_bank_ops jpeg_v5_0_1_aca_bank_ops = { 1043 .aca_bank_parser = jpeg_v5_0_1_aca_bank_parser, 1044 .aca_bank_is_valid = jpeg_v5_0_1_aca_bank_is_valid, 1045 }; 1046 1047 static const struct aca_info jpeg_v5_0_1_aca_info = { 1048 .hwip = ACA_HWIP_TYPE_SMU, 1049 .mask = ACA_ERROR_UE_MASK, 1050 .bank_ops = &jpeg_v5_0_1_aca_bank_ops, 1051 }; 1052 1053 static int jpeg_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) 1054 { 1055 int r; 1056 1057 r = amdgpu_ras_block_late_init(adev, ras_block); 1058 if (r) 1059 return r; 1060 1061 if (amdgpu_ras_is_supported(adev, ras_block->block) && 1062 adev->jpeg.inst->ras_poison_irq.funcs) { 1063 r = amdgpu_irq_get(adev, &adev->jpeg.inst->ras_poison_irq, 0); 1064 if (r) 1065 goto late_fini; 1066 } 1067 1068 r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG, 1069 &jpeg_v5_0_1_aca_info, NULL); 1070 if (r) 1071 goto late_fini; 1072 1073 return 0; 1074 1075 late_fini: 1076 amdgpu_ras_block_late_fini(adev, ras_block); 1077 1078 return r; 1079 } 1080 1081 static struct amdgpu_jpeg_ras jpeg_v5_0_1_ras = { 1082 .ras_block = { 1083 .hw_ops = &jpeg_v5_0_1_ras_hw_ops, 1084 .ras_late_init = jpeg_v5_0_1_ras_late_init, 1085 }, 1086 }; 1087 1088 static void jpeg_v5_0_1_set_ras_funcs(struct amdgpu_device *adev) 1089 { 1090 adev->jpeg.ras = &jpeg_v5_0_1_ras; 1091 } 1092