1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2014-2024 Advanced Micro Devices, Inc. All rights reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include "amdgpu.h" 25 #include "amdgpu_jpeg.h" 26 #include "amdgpu_pm.h" 27 #include "soc15.h" 28 #include "soc15d.h" 29 #include "jpeg_v4_0_3.h" 30 #include "jpeg_v5_0_1.h" 31 #include "mmsch_v5_0.h" 32 33 #include "vcn/vcn_5_0_0_offset.h" 34 #include "vcn/vcn_5_0_0_sh_mask.h" 35 #include "ivsrcid/vcn/irqsrcs_vcn_5_0.h" 36 37 static int jpeg_v5_0_1_start_sriov(struct amdgpu_device *adev); 38 static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev); 39 static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev); 40 static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block, 41 enum amd_powergating_state state); 42 static void jpeg_v5_0_1_set_ras_funcs(struct amdgpu_device *adev); 43 static void jpeg_v5_0_1_dec_ring_set_wptr(struct amdgpu_ring *ring); 44 45 static int amdgpu_ih_srcid_jpeg[] = { 46 VCN_5_0__SRCID__JPEG_DECODE, 47 VCN_5_0__SRCID__JPEG1_DECODE, 48 VCN_5_0__SRCID__JPEG2_DECODE, 49 VCN_5_0__SRCID__JPEG3_DECODE, 50 VCN_5_0__SRCID__JPEG4_DECODE, 51 VCN_5_0__SRCID__JPEG5_DECODE, 52 VCN_5_0__SRCID__JPEG6_DECODE, 53 VCN_5_0__SRCID__JPEG7_DECODE, 54 VCN_5_0__SRCID__JPEG8_DECODE, 55 VCN_5_0__SRCID__JPEG9_DECODE, 56 }; 57 58 static const struct amdgpu_hwip_reg_entry jpeg_reg_list_5_0_1[] = { 59 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_POWER_STATUS), 60 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_INT_STAT), 61 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_RPTR), 62 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_WPTR), 63 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC0_UVD_JRBC_STATUS), 64 SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_ADDR_MODE), 65 SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG), 66 SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_Y_GFX10_TILING_SURFACE), 67 SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_UV_GFX10_TILING_SURFACE), 68 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_PITCH), 69 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_UV_PITCH), 70 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC1_UVD_JRBC_RB_RPTR), 71 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC1_UVD_JRBC_RB_WPTR), 72 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC1_UVD_JRBC_STATUS), 73 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC2_UVD_JRBC_RB_RPTR), 74 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC2_UVD_JRBC_RB_WPTR), 75 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC2_UVD_JRBC_STATUS), 76 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC3_UVD_JRBC_RB_RPTR), 77 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC3_UVD_JRBC_RB_WPTR), 78 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC3_UVD_JRBC_STATUS), 79 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC4_UVD_JRBC_RB_RPTR), 80 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC4_UVD_JRBC_RB_WPTR), 81 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC4_UVD_JRBC_STATUS), 82 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC5_UVD_JRBC_RB_RPTR), 83 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC5_UVD_JRBC_RB_WPTR), 84 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC5_UVD_JRBC_STATUS), 85 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC6_UVD_JRBC_RB_RPTR), 86 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC6_UVD_JRBC_RB_WPTR), 87 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC6_UVD_JRBC_STATUS), 88 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC7_UVD_JRBC_RB_RPTR), 89 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC7_UVD_JRBC_RB_WPTR), 90 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC7_UVD_JRBC_STATUS), 91 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC8_UVD_JRBC_RB_RPTR), 92 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC8_UVD_JRBC_RB_WPTR), 93 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC8_UVD_JRBC_STATUS), 94 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC9_UVD_JRBC_RB_RPTR), 95 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC9_UVD_JRBC_RB_WPTR), 96 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC9_UVD_JRBC_STATUS), 97 }; 98 99 static int jpeg_v5_0_1_core_reg_offset(u32 pipe) 100 { 101 if (pipe <= AMDGPU_MAX_JPEG_RINGS_4_0_3) 102 return ((0x40 * pipe) - 0xc80); 103 else 104 return ((0x40 * pipe) - 0x440); 105 } 106 107 /** 108 * jpeg_v5_0_1_early_init - set function pointers 109 * 110 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 111 * 112 * Set ring and irq function pointers 113 */ 114 static int jpeg_v5_0_1_early_init(struct amdgpu_ip_block *ip_block) 115 { 116 struct amdgpu_device *adev = ip_block->adev; 117 118 if (!adev->jpeg.num_jpeg_inst || adev->jpeg.num_jpeg_inst > AMDGPU_MAX_JPEG_INSTANCES) 119 return -ENOENT; 120 121 adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS; 122 jpeg_v5_0_1_set_dec_ring_funcs(adev); 123 jpeg_v5_0_1_set_irq_funcs(adev); 124 jpeg_v5_0_1_set_ras_funcs(adev); 125 126 return 0; 127 } 128 129 /** 130 * jpeg_v5_0_1_sw_init - sw init for JPEG block 131 * 132 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 133 * 134 * Load firmware and sw initialization 135 */ 136 static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block) 137 { 138 struct amdgpu_device *adev = ip_block->adev; 139 struct amdgpu_ring *ring; 140 int i, j, r, jpeg_inst; 141 142 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { 143 /* JPEG TRAP */ 144 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 145 amdgpu_ih_srcid_jpeg[j], &adev->jpeg.inst->irq); 146 if (r) 147 return r; 148 } 149 /* JPEG DJPEG POISON EVENT */ 150 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 151 VCN_5_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq); 152 if (r) 153 return r; 154 155 /* JPEG EJPEG POISON EVENT */ 156 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 157 VCN_5_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq); 158 if (r) 159 return r; 160 161 r = amdgpu_jpeg_sw_init(adev); 162 if (r) 163 return r; 164 165 r = amdgpu_jpeg_resume(adev); 166 if (r) 167 return r; 168 169 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 170 jpeg_inst = GET_INST(JPEG, i); 171 172 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { 173 ring = &adev->jpeg.inst[i].ring_dec[j]; 174 ring->use_doorbell = true; 175 ring->vm_hub = AMDGPU_MMHUB0(adev->jpeg.inst[i].aid_id); 176 if (!amdgpu_sriov_vf(adev)) { 177 ring->doorbell_index = 178 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 179 1 + j + 11 * jpeg_inst; 180 } else { 181 ring->doorbell_index = 182 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 183 2 + j + 32 * jpeg_inst; 184 } 185 sprintf(ring->name, "jpeg_dec_%d.%d", adev->jpeg.inst[i].aid_id, j); 186 r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, 187 AMDGPU_RING_PRIO_DEFAULT, NULL); 188 if (r) 189 return r; 190 191 adev->jpeg.internal.jpeg_pitch[j] = 192 regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET; 193 adev->jpeg.inst[i].external.jpeg_pitch[j] = 194 SOC15_REG_OFFSET1(JPEG, jpeg_inst, regUVD_JRBC_SCRATCH0, 195 (j ? jpeg_v5_0_1_core_reg_offset(j) : 0)); 196 } 197 } 198 199 r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_5_0_1, ARRAY_SIZE(jpeg_reg_list_5_0_1)); 200 if (r) 201 return r; 202 203 if (!amdgpu_sriov_vf(adev)) { 204 adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE; 205 r = amdgpu_jpeg_sysfs_reset_mask_init(adev); 206 if (r) 207 return r; 208 } 209 210 return 0; 211 } 212 213 /** 214 * jpeg_v5_0_1_sw_fini - sw fini for JPEG block 215 * 216 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 217 * 218 * JPEG suspend and free up sw allocation 219 */ 220 static int jpeg_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block) 221 { 222 struct amdgpu_device *adev = ip_block->adev; 223 int r; 224 225 r = amdgpu_jpeg_suspend(adev); 226 if (r) 227 return r; 228 229 if (!amdgpu_sriov_vf(adev)) 230 amdgpu_jpeg_sysfs_reset_mask_fini(adev); 231 232 r = amdgpu_jpeg_sw_fini(adev); 233 234 return r; 235 } 236 237 /** 238 * jpeg_v5_0_1_hw_init - start and test JPEG block 239 * 240 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 241 * 242 */ 243 static int jpeg_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block) 244 { 245 struct amdgpu_device *adev = ip_block->adev; 246 struct amdgpu_ring *ring; 247 int i, j, r, jpeg_inst; 248 249 if (amdgpu_sriov_vf(adev)) { 250 r = jpeg_v5_0_1_start_sriov(adev); 251 if (r) 252 return r; 253 254 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 255 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { 256 ring = &adev->jpeg.inst[i].ring_dec[j]; 257 ring->wptr = 0; 258 ring->wptr_old = 0; 259 jpeg_v5_0_1_dec_ring_set_wptr(ring); 260 ring->sched.ready = true; 261 } 262 } 263 return 0; 264 } 265 if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100) 266 adev->jpeg.caps |= AMDGPU_JPEG_CAPS(RRMT_ENABLED); 267 268 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 269 jpeg_inst = GET_INST(JPEG, i); 270 ring = adev->jpeg.inst[i].ring_dec; 271 if (ring->use_doorbell) 272 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, 273 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 11 * jpeg_inst, 274 adev->jpeg.inst[i].aid_id); 275 276 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { 277 ring = &adev->jpeg.inst[i].ring_dec[j]; 278 if (ring->use_doorbell) 279 WREG32_SOC15_OFFSET(VCN, GET_INST(VCN, i), regVCN_JPEG_DB_CTRL, 280 ring->pipe, 281 ring->doorbell_index << 282 VCN_JPEG_DB_CTRL__OFFSET__SHIFT | 283 VCN_JPEG_DB_CTRL__EN_MASK); 284 r = amdgpu_ring_test_helper(ring); 285 if (r) 286 return r; 287 } 288 } 289 290 return 0; 291 } 292 293 /** 294 * jpeg_v5_0_1_hw_fini - stop the hardware block 295 * 296 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 297 * 298 * Stop the JPEG block, mark ring as not ready any more 299 */ 300 static int jpeg_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block) 301 { 302 struct amdgpu_device *adev = ip_block->adev; 303 int ret = 0; 304 305 cancel_delayed_work_sync(&adev->jpeg.idle_work); 306 307 if (!amdgpu_sriov_vf(adev)) { 308 if (adev->jpeg.cur_state != AMD_PG_STATE_GATE) 309 ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE); 310 } 311 312 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) 313 amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0); 314 315 return ret; 316 } 317 318 /** 319 * jpeg_v5_0_1_suspend - suspend JPEG block 320 * 321 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 322 * 323 * HW fini and suspend JPEG block 324 */ 325 static int jpeg_v5_0_1_suspend(struct amdgpu_ip_block *ip_block) 326 { 327 struct amdgpu_device *adev = ip_block->adev; 328 int r; 329 330 r = jpeg_v5_0_1_hw_fini(ip_block); 331 if (r) 332 return r; 333 334 r = amdgpu_jpeg_suspend(adev); 335 336 return r; 337 } 338 339 /** 340 * jpeg_v5_0_1_resume - resume JPEG block 341 * 342 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 343 * 344 * Resume firmware and hw init JPEG block 345 */ 346 static int jpeg_v5_0_1_resume(struct amdgpu_ip_block *ip_block) 347 { 348 struct amdgpu_device *adev = ip_block->adev; 349 int r; 350 351 r = amdgpu_jpeg_resume(adev); 352 if (r) 353 return r; 354 355 r = jpeg_v5_0_1_hw_init(ip_block); 356 357 return r; 358 } 359 360 static void jpeg_v5_0_1_init_inst(struct amdgpu_device *adev, int i) 361 { 362 int jpeg_inst = GET_INST(JPEG, i); 363 364 /* disable anti hang mechanism */ 365 WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS), 0, 366 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); 367 368 /* keep the JPEG in static PG mode */ 369 WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS), 0, 370 ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK); 371 372 /* MJPEG global tiling registers */ 373 WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG, 374 adev->gfx.config.gb_addr_config); 375 376 /* enable JMI channel */ 377 WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), 0, 378 ~UVD_JMI_CNTL__SOFT_RESET_MASK); 379 } 380 381 static void jpeg_v5_0_1_deinit_inst(struct amdgpu_device *adev, int i) 382 { 383 int jpeg_inst = GET_INST(JPEG, i); 384 /* reset JMI */ 385 WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), 386 UVD_JMI_CNTL__SOFT_RESET_MASK, 387 ~UVD_JMI_CNTL__SOFT_RESET_MASK); 388 389 /* enable anti hang mechanism */ 390 WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS), 391 UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, 392 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); 393 } 394 395 static void jpeg_v5_0_1_init_jrbc(struct amdgpu_ring *ring) 396 { 397 struct amdgpu_device *adev = ring->adev; 398 u32 reg, data, mask; 399 int jpeg_inst = GET_INST(JPEG, ring->me); 400 int reg_offset = ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0; 401 402 /* enable System Interrupt for JRBC */ 403 reg = SOC15_REG_OFFSET(JPEG, jpeg_inst, regJPEG_SYS_INT_EN); 404 if (ring->pipe < AMDGPU_MAX_JPEG_RINGS_4_0_3) { 405 data = JPEG_SYS_INT_EN__DJRBC0_MASK << ring->pipe; 406 mask = ~(JPEG_SYS_INT_EN__DJRBC0_MASK << ring->pipe); 407 WREG32_P(reg, data, mask); 408 } else { 409 data = JPEG_SYS_INT_EN__DJRBC0_MASK << (ring->pipe+12); 410 mask = ~(JPEG_SYS_INT_EN__DJRBC0_MASK << (ring->pipe+12)); 411 WREG32_P(reg, data, mask); 412 } 413 414 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 415 regUVD_LMI_JRBC_RB_VMID, 416 reg_offset, 0); 417 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 418 regUVD_JRBC_RB_CNTL, 419 reg_offset, 420 (0x00000001L | 0x00000002L)); 421 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 422 regUVD_LMI_JRBC_RB_64BIT_BAR_LOW, 423 reg_offset, lower_32_bits(ring->gpu_addr)); 424 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 425 regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, 426 reg_offset, upper_32_bits(ring->gpu_addr)); 427 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 428 regUVD_JRBC_RB_RPTR, 429 reg_offset, 0); 430 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 431 regUVD_JRBC_RB_WPTR, 432 reg_offset, 0); 433 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 434 regUVD_JRBC_RB_CNTL, 435 reg_offset, 0x00000002L); 436 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 437 regUVD_JRBC_RB_SIZE, 438 reg_offset, ring->ring_size / 4); 439 ring->wptr = RREG32_SOC15_OFFSET(JPEG, jpeg_inst, regUVD_JRBC_RB_WPTR, 440 reg_offset); 441 } 442 443 static int jpeg_v5_0_1_start_sriov(struct amdgpu_device *adev) 444 { 445 struct amdgpu_ring *ring; 446 uint64_t ctx_addr; 447 uint32_t param, resp, expected; 448 uint32_t tmp, timeout; 449 450 struct amdgpu_mm_table *table = &adev->virt.mm_table; 451 uint32_t *table_loc; 452 uint32_t table_size; 453 uint32_t size, size_dw, item_offset; 454 uint32_t init_status; 455 int i, j, jpeg_inst; 456 457 struct mmsch_v5_0_cmd_direct_write 458 direct_wt = { {0} }; 459 struct mmsch_v5_0_cmd_end end = { {0} }; 460 struct mmsch_v5_0_init_header header; 461 462 direct_wt.cmd_header.command_type = 463 MMSCH_COMMAND__DIRECT_REG_WRITE; 464 end.cmd_header.command_type = 465 MMSCH_COMMAND__END; 466 467 for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { 468 jpeg_inst = GET_INST(JPEG, i); 469 470 memset(&header, 0, sizeof(struct mmsch_v5_0_init_header)); 471 header.version = MMSCH_VERSION; 472 header.total_size = sizeof(struct mmsch_v5_0_init_header) >> 2; 473 474 table_loc = (uint32_t *)table->cpu_addr; 475 table_loc += header.total_size; 476 477 item_offset = header.total_size; 478 479 for (j = 0; j < adev->jpeg.num_jpeg_rings; j++) { 480 ring = &adev->jpeg.inst[i].ring_dec[j]; 481 table_size = 0; 482 483 tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW); 484 MMSCH_V5_0_INSERT_DIRECT_WT(tmp, lower_32_bits(ring->gpu_addr)); 485 tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH); 486 MMSCH_V5_0_INSERT_DIRECT_WT(tmp, upper_32_bits(ring->gpu_addr)); 487 tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_JRBC_RB_SIZE); 488 MMSCH_V5_0_INSERT_DIRECT_WT(tmp, ring->ring_size / 4); 489 490 if (j < 5) { 491 header.mjpegdec0[j].table_offset = item_offset; 492 header.mjpegdec0[j].init_status = 0; 493 header.mjpegdec0[j].table_size = table_size; 494 } else { 495 header.mjpegdec1[j - 5].table_offset = item_offset; 496 header.mjpegdec1[j - 5].init_status = 0; 497 header.mjpegdec1[j - 5].table_size = table_size; 498 } 499 header.total_size += table_size; 500 item_offset += table_size; 501 } 502 503 MMSCH_V5_0_INSERT_END(); 504 505 /* send init table to MMSCH */ 506 size = sizeof(struct mmsch_v5_0_init_header); 507 table_loc = (uint32_t *)table->cpu_addr; 508 memcpy((void *)table_loc, &header, size); 509 510 ctx_addr = table->gpu_addr; 511 WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr)); 512 WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr)); 513 514 tmp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID); 515 tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; 516 tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); 517 WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID, tmp); 518 519 size = header.total_size; 520 WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_SIZE, size); 521 522 WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP, 0); 523 524 param = 0x00000001; 525 WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_HOST, param); 526 tmp = 0; 527 timeout = 1000; 528 resp = 0; 529 expected = MMSCH_VF_MAILBOX_RESP__OK; 530 init_status = 531 ((struct mmsch_v5_0_init_header *)(table_loc))->mjpegdec0[i].init_status; 532 while (resp != expected) { 533 resp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP); 534 535 if (resp != 0) 536 break; 537 udelay(10); 538 tmp = tmp + 10; 539 if (tmp >= timeout) { 540 DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\ 541 " waiting for regMMSCH_VF_MAILBOX_RESP "\ 542 "(expected=0x%08x, readback=0x%08x)\n", 543 tmp, expected, resp); 544 return -EBUSY; 545 } 546 } 547 if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE && 548 init_status != MMSCH_VF_ENGINE_STATUS__PASS) 549 DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init status for jpeg: %x\n", 550 resp, init_status); 551 552 } 553 return 0; 554 } 555 556 /** 557 * jpeg_v5_0_1_start - start JPEG block 558 * 559 * @adev: amdgpu_device pointer 560 * 561 * Setup and start the JPEG block 562 */ 563 static int jpeg_v5_0_1_start(struct amdgpu_device *adev) 564 { 565 struct amdgpu_ring *ring; 566 int i, j; 567 568 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 569 jpeg_v5_0_1_init_inst(adev, i); 570 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { 571 ring = &adev->jpeg.inst[i].ring_dec[j]; 572 jpeg_v5_0_1_init_jrbc(ring); 573 } 574 } 575 576 return 0; 577 } 578 579 /** 580 * jpeg_v5_0_1_stop - stop JPEG block 581 * 582 * @adev: amdgpu_device pointer 583 * 584 * stop the JPEG block 585 */ 586 static int jpeg_v5_0_1_stop(struct amdgpu_device *adev) 587 { 588 int i; 589 590 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) 591 jpeg_v5_0_1_deinit_inst(adev, i); 592 593 return 0; 594 } 595 596 /** 597 * jpeg_v5_0_1_dec_ring_get_rptr - get read pointer 598 * 599 * @ring: amdgpu_ring pointer 600 * 601 * Returns the current hardware read pointer 602 */ 603 static uint64_t jpeg_v5_0_1_dec_ring_get_rptr(struct amdgpu_ring *ring) 604 { 605 struct amdgpu_device *adev = ring->adev; 606 607 return RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC_RB_RPTR, 608 ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0); 609 } 610 611 /** 612 * jpeg_v5_0_1_dec_ring_get_wptr - get write pointer 613 * 614 * @ring: amdgpu_ring pointer 615 * 616 * Returns the current hardware write pointer 617 */ 618 static uint64_t jpeg_v5_0_1_dec_ring_get_wptr(struct amdgpu_ring *ring) 619 { 620 struct amdgpu_device *adev = ring->adev; 621 622 if (ring->use_doorbell) 623 return adev->wb.wb[ring->wptr_offs]; 624 625 return RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC_RB_WPTR, 626 ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0); 627 } 628 629 /** 630 * jpeg_v5_0_1_dec_ring_set_wptr - set write pointer 631 * 632 * @ring: amdgpu_ring pointer 633 * 634 * Commits the write pointer to the hardware 635 */ 636 static void jpeg_v5_0_1_dec_ring_set_wptr(struct amdgpu_ring *ring) 637 { 638 struct amdgpu_device *adev = ring->adev; 639 640 if (ring->use_doorbell) { 641 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); 642 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 643 } else { 644 WREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me), 645 regUVD_JRBC_RB_WPTR, 646 (ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0), 647 lower_32_bits(ring->wptr)); 648 } 649 } 650 651 static bool jpeg_v5_0_1_is_idle(struct amdgpu_ip_block *ip_block) 652 { 653 struct amdgpu_device *adev = ip_block->adev; 654 bool ret = false; 655 int i, j; 656 657 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 658 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { 659 int reg_offset = (j ? jpeg_v5_0_1_core_reg_offset(j) : 0); 660 661 ret &= ((RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, i), 662 regUVD_JRBC_STATUS, reg_offset) & 663 UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == 664 UVD_JRBC_STATUS__RB_JOB_DONE_MASK); 665 } 666 } 667 668 return ret; 669 } 670 671 static int jpeg_v5_0_1_wait_for_idle(struct amdgpu_ip_block *ip_block) 672 { 673 struct amdgpu_device *adev = ip_block->adev; 674 int ret = 0; 675 int i, j; 676 677 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 678 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { 679 int reg_offset = (j ? jpeg_v5_0_1_core_reg_offset(j) : 0); 680 681 ret &= SOC15_WAIT_ON_RREG_OFFSET(JPEG, GET_INST(JPEG, i), 682 regUVD_JRBC_STATUS, reg_offset, 683 UVD_JRBC_STATUS__RB_JOB_DONE_MASK, 684 UVD_JRBC_STATUS__RB_JOB_DONE_MASK); 685 } 686 } 687 return ret; 688 } 689 690 static int jpeg_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block, 691 enum amd_clockgating_state state) 692 { 693 struct amdgpu_device *adev = ip_block->adev; 694 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 695 696 int i; 697 698 if (!enable) 699 return 0; 700 701 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 702 if (!jpeg_v5_0_1_is_idle(ip_block)) 703 return -EBUSY; 704 } 705 706 return 0; 707 } 708 709 static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block, 710 enum amd_powergating_state state) 711 { 712 struct amdgpu_device *adev = ip_block->adev; 713 int ret; 714 715 if (amdgpu_sriov_vf(adev)) { 716 adev->jpeg.cur_state = AMD_PG_STATE_UNGATE; 717 return 0; 718 } 719 720 if (state == adev->jpeg.cur_state) 721 return 0; 722 723 if (state == AMD_PG_STATE_GATE) 724 ret = jpeg_v5_0_1_stop(adev); 725 else 726 ret = jpeg_v5_0_1_start(adev); 727 728 if (!ret) 729 adev->jpeg.cur_state = state; 730 731 return ret; 732 } 733 734 static int jpeg_v5_0_1_set_interrupt_state(struct amdgpu_device *adev, 735 struct amdgpu_irq_src *source, 736 unsigned int type, 737 enum amdgpu_interrupt_state state) 738 { 739 return 0; 740 } 741 742 static int jpeg_v5_0_1_set_ras_interrupt_state(struct amdgpu_device *adev, 743 struct amdgpu_irq_src *source, 744 unsigned int type, 745 enum amdgpu_interrupt_state state) 746 { 747 return 0; 748 } 749 750 751 752 static int jpeg_v5_0_1_process_interrupt(struct amdgpu_device *adev, 753 struct amdgpu_irq_src *source, 754 struct amdgpu_iv_entry *entry) 755 { 756 u32 i, inst; 757 758 i = node_id_to_phys_map[entry->node_id]; 759 DRM_DEV_DEBUG(adev->dev, "IH: JPEG TRAP\n"); 760 761 for (inst = 0; inst < adev->jpeg.num_jpeg_inst; ++inst) 762 if (adev->jpeg.inst[inst].aid_id == i) 763 break; 764 765 if (inst >= adev->jpeg.num_jpeg_inst) { 766 dev_WARN_ONCE(adev->dev, 1, 767 "Interrupt received for unknown JPEG instance %d", 768 entry->node_id); 769 return 0; 770 } 771 772 switch (entry->src_id) { 773 case VCN_5_0__SRCID__JPEG_DECODE: 774 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[0]); 775 break; 776 case VCN_5_0__SRCID__JPEG1_DECODE: 777 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[1]); 778 break; 779 case VCN_5_0__SRCID__JPEG2_DECODE: 780 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[2]); 781 break; 782 case VCN_5_0__SRCID__JPEG3_DECODE: 783 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[3]); 784 break; 785 case VCN_5_0__SRCID__JPEG4_DECODE: 786 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[4]); 787 break; 788 case VCN_5_0__SRCID__JPEG5_DECODE: 789 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[5]); 790 break; 791 case VCN_5_0__SRCID__JPEG6_DECODE: 792 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[6]); 793 break; 794 case VCN_5_0__SRCID__JPEG7_DECODE: 795 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[7]); 796 break; 797 case VCN_5_0__SRCID__JPEG8_DECODE: 798 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[8]); 799 break; 800 case VCN_5_0__SRCID__JPEG9_DECODE: 801 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[9]); 802 break; 803 default: 804 DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", 805 entry->src_id, entry->src_data[0]); 806 break; 807 } 808 809 return 0; 810 } 811 812 static void jpeg_v5_0_1_core_stall_reset(struct amdgpu_ring *ring) 813 { 814 struct amdgpu_device *adev = ring->adev; 815 int jpeg_inst = GET_INST(JPEG, ring->me); 816 int reg_offset = ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0; 817 818 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 819 regUVD_JMI0_UVD_JMI_CLIENT_STALL, 820 reg_offset, 0x1F); 821 SOC15_WAIT_ON_RREG_OFFSET(JPEG, jpeg_inst, 822 regUVD_JMI0_UVD_JMI_CLIENT_CLEAN_STATUS, 823 reg_offset, 0x1F, 0x1F); 824 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 825 regUVD_JMI0_JPEG_LMI_DROP, 826 reg_offset, 0x1F); 827 WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 1 << ring->pipe); 828 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 829 regUVD_JMI0_UVD_JMI_CLIENT_STALL, 830 reg_offset, 0x00); 831 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 832 regUVD_JMI0_JPEG_LMI_DROP, 833 reg_offset, 0x00); 834 WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 0x00); 835 } 836 837 static int jpeg_v5_0_1_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) 838 { 839 if (amdgpu_sriov_vf(ring->adev)) 840 return -EOPNOTSUPP; 841 842 jpeg_v5_0_1_core_stall_reset(ring); 843 jpeg_v5_0_1_init_jrbc(ring); 844 return amdgpu_ring_test_helper(ring); 845 } 846 847 static const struct amd_ip_funcs jpeg_v5_0_1_ip_funcs = { 848 .name = "jpeg_v5_0_1", 849 .early_init = jpeg_v5_0_1_early_init, 850 .late_init = NULL, 851 .sw_init = jpeg_v5_0_1_sw_init, 852 .sw_fini = jpeg_v5_0_1_sw_fini, 853 .hw_init = jpeg_v5_0_1_hw_init, 854 .hw_fini = jpeg_v5_0_1_hw_fini, 855 .suspend = jpeg_v5_0_1_suspend, 856 .resume = jpeg_v5_0_1_resume, 857 .is_idle = jpeg_v5_0_1_is_idle, 858 .wait_for_idle = jpeg_v5_0_1_wait_for_idle, 859 .check_soft_reset = NULL, 860 .pre_soft_reset = NULL, 861 .soft_reset = NULL, 862 .post_soft_reset = NULL, 863 .set_clockgating_state = jpeg_v5_0_1_set_clockgating_state, 864 .set_powergating_state = jpeg_v5_0_1_set_powergating_state, 865 .dump_ip_state = amdgpu_jpeg_dump_ip_state, 866 .print_ip_state = amdgpu_jpeg_print_ip_state, 867 }; 868 869 static const struct amdgpu_ring_funcs jpeg_v5_0_1_dec_ring_vm_funcs = { 870 .type = AMDGPU_RING_TYPE_VCN_JPEG, 871 .align_mask = 0xf, 872 .get_rptr = jpeg_v5_0_1_dec_ring_get_rptr, 873 .get_wptr = jpeg_v5_0_1_dec_ring_get_wptr, 874 .set_wptr = jpeg_v5_0_1_dec_ring_set_wptr, 875 .emit_frame_size = 876 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 877 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + 878 8 + /* jpeg_v5_0_1_dec_ring_emit_vm_flush */ 879 22 + 22 + /* jpeg_v5_0_1_dec_ring_emit_fence x2 vm fence */ 880 8 + 16, 881 .emit_ib_size = 22, /* jpeg_v5_0_1_dec_ring_emit_ib */ 882 .emit_ib = jpeg_v4_0_3_dec_ring_emit_ib, 883 .emit_fence = jpeg_v4_0_3_dec_ring_emit_fence, 884 .emit_vm_flush = jpeg_v4_0_3_dec_ring_emit_vm_flush, 885 .emit_hdp_flush = jpeg_v4_0_3_ring_emit_hdp_flush, 886 .test_ring = amdgpu_jpeg_dec_ring_test_ring, 887 .test_ib = amdgpu_jpeg_dec_ring_test_ib, 888 .insert_nop = jpeg_v4_0_3_dec_ring_nop, 889 .insert_start = jpeg_v4_0_3_dec_ring_insert_start, 890 .insert_end = jpeg_v4_0_3_dec_ring_insert_end, 891 .pad_ib = amdgpu_ring_generic_pad_ib, 892 .begin_use = amdgpu_jpeg_ring_begin_use, 893 .end_use = amdgpu_jpeg_ring_end_use, 894 .emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg, 895 .emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait, 896 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 897 .reset = jpeg_v5_0_1_ring_reset, 898 }; 899 900 static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev) 901 { 902 int i, j, jpeg_inst; 903 904 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 905 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { 906 adev->jpeg.inst[i].ring_dec[j].funcs = &jpeg_v5_0_1_dec_ring_vm_funcs; 907 adev->jpeg.inst[i].ring_dec[j].me = i; 908 adev->jpeg.inst[i].ring_dec[j].pipe = j; 909 } 910 jpeg_inst = GET_INST(JPEG, i); 911 adev->jpeg.inst[i].aid_id = 912 jpeg_inst / adev->jpeg.num_inst_per_aid; 913 } 914 } 915 916 static const struct amdgpu_irq_src_funcs jpeg_v5_0_1_irq_funcs = { 917 .set = jpeg_v5_0_1_set_interrupt_state, 918 .process = jpeg_v5_0_1_process_interrupt, 919 }; 920 921 static const struct amdgpu_irq_src_funcs jpeg_v5_0_1_ras_irq_funcs = { 922 .set = jpeg_v5_0_1_set_ras_interrupt_state, 923 .process = amdgpu_jpeg_process_poison_irq, 924 }; 925 926 static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev) 927 { 928 int i; 929 930 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) 931 adev->jpeg.inst->irq.num_types += adev->jpeg.num_jpeg_rings; 932 933 adev->jpeg.inst->irq.funcs = &jpeg_v5_0_1_irq_funcs; 934 935 adev->jpeg.inst->ras_poison_irq.num_types = 1; 936 adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v5_0_1_ras_irq_funcs; 937 938 } 939 940 const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block = { 941 .type = AMD_IP_BLOCK_TYPE_JPEG, 942 .major = 5, 943 .minor = 0, 944 .rev = 1, 945 .funcs = &jpeg_v5_0_1_ip_funcs, 946 }; 947 948 static uint32_t jpeg_v5_0_1_query_poison_by_instance(struct amdgpu_device *adev, 949 uint32_t instance, uint32_t sub_block) 950 { 951 uint32_t poison_stat = 0, reg_value = 0; 952 953 switch (sub_block) { 954 case AMDGPU_JPEG_V5_0_1_JPEG0: 955 reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG0_STATUS); 956 poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG0_STATUS, POISONED_PF); 957 break; 958 case AMDGPU_JPEG_V5_0_1_JPEG1: 959 reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG1_STATUS); 960 poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG1_STATUS, POISONED_PF); 961 break; 962 default: 963 break; 964 } 965 966 if (poison_stat) 967 dev_info(adev->dev, "Poison detected in JPEG%d sub_block%d\n", 968 instance, sub_block); 969 970 return poison_stat; 971 } 972 973 static bool jpeg_v5_0_1_query_ras_poison_status(struct amdgpu_device *adev) 974 { 975 uint32_t inst = 0, sub = 0, poison_stat = 0; 976 977 for (inst = 0; inst < adev->jpeg.num_jpeg_inst; inst++) 978 for (sub = 0; sub < AMDGPU_JPEG_V5_0_1_MAX_SUB_BLOCK; sub++) 979 poison_stat += 980 jpeg_v5_0_1_query_poison_by_instance(adev, inst, sub); 981 982 return !!poison_stat; 983 } 984 985 static const struct amdgpu_ras_block_hw_ops jpeg_v5_0_1_ras_hw_ops = { 986 .query_poison_status = jpeg_v5_0_1_query_ras_poison_status, 987 }; 988 989 static int jpeg_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, 990 enum aca_smu_type type, void *data) 991 { 992 struct aca_bank_info info; 993 u64 misc0; 994 int ret; 995 996 ret = aca_bank_info_decode(bank, &info); 997 if (ret) 998 return ret; 999 1000 misc0 = bank->regs[ACA_REG_IDX_MISC0]; 1001 switch (type) { 1002 case ACA_SMU_TYPE_UE: 1003 bank->aca_err_type = ACA_ERROR_TYPE_UE; 1004 ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, 1005 1ULL); 1006 break; 1007 case ACA_SMU_TYPE_CE: 1008 bank->aca_err_type = ACA_ERROR_TYPE_CE; 1009 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 1010 ACA_REG__MISC0__ERRCNT(misc0)); 1011 break; 1012 default: 1013 return -EINVAL; 1014 } 1015 1016 return ret; 1017 } 1018 1019 /* reference to smu driver if header file */ 1020 static int jpeg_v5_0_1_err_codes[] = { 1021 16, 17, 18, 19, 20, 21, 22, 23, /* JPEG[0-7][S|D] */ 1022 24, 25, 26, 27, 28, 29, 30, 31 1023 }; 1024 1025 static bool jpeg_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, 1026 enum aca_smu_type type, void *data) 1027 { 1028 u32 instlo; 1029 1030 instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); 1031 instlo &= GENMASK(31, 1); 1032 1033 if (instlo != mmSMNAID_AID0_MCA_SMU) 1034 return false; 1035 1036 if (aca_bank_check_error_codes(handle->adev, bank, 1037 jpeg_v5_0_1_err_codes, 1038 ARRAY_SIZE(jpeg_v5_0_1_err_codes))) 1039 return false; 1040 1041 return true; 1042 } 1043 1044 static const struct aca_bank_ops jpeg_v5_0_1_aca_bank_ops = { 1045 .aca_bank_parser = jpeg_v5_0_1_aca_bank_parser, 1046 .aca_bank_is_valid = jpeg_v5_0_1_aca_bank_is_valid, 1047 }; 1048 1049 static const struct aca_info jpeg_v5_0_1_aca_info = { 1050 .hwip = ACA_HWIP_TYPE_SMU, 1051 .mask = ACA_ERROR_UE_MASK, 1052 .bank_ops = &jpeg_v5_0_1_aca_bank_ops, 1053 }; 1054 1055 static int jpeg_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) 1056 { 1057 int r; 1058 1059 r = amdgpu_ras_block_late_init(adev, ras_block); 1060 if (r) 1061 return r; 1062 1063 if (amdgpu_ras_is_supported(adev, ras_block->block) && 1064 adev->jpeg.inst->ras_poison_irq.funcs) { 1065 r = amdgpu_irq_get(adev, &adev->jpeg.inst->ras_poison_irq, 0); 1066 if (r) 1067 goto late_fini; 1068 } 1069 1070 r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG, 1071 &jpeg_v5_0_1_aca_info, NULL); 1072 if (r) 1073 goto late_fini; 1074 1075 return 0; 1076 1077 late_fini: 1078 amdgpu_ras_block_late_fini(adev, ras_block); 1079 1080 return r; 1081 } 1082 1083 static struct amdgpu_jpeg_ras jpeg_v5_0_1_ras = { 1084 .ras_block = { 1085 .hw_ops = &jpeg_v5_0_1_ras_hw_ops, 1086 .ras_late_init = jpeg_v5_0_1_ras_late_init, 1087 }, 1088 }; 1089 1090 static void jpeg_v5_0_1_set_ras_funcs(struct amdgpu_device *adev) 1091 { 1092 adev->jpeg.ras = &jpeg_v5_0_1_ras; 1093 } 1094