1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2023 Intel Corporation */ 3 #include "adf_common_drv.h" 4 #include "adf_gen4_hw_data.h" 5 #include "adf_gen4_ras.h" 6 #include "adf_sysfs_ras_counters.h" 7 8 #define BITS_PER_REG(_n_) (sizeof(_n_) * BITS_PER_BYTE) 9 10 static void enable_errsou_reporting(void __iomem *csr) 11 { 12 /* Enable correctable error reporting in ERRSOU0 */ 13 ADF_CSR_WR(csr, ADF_GEN4_ERRMSK0, 0); 14 15 /* Enable uncorrectable error reporting in ERRSOU1 */ 16 ADF_CSR_WR(csr, ADF_GEN4_ERRMSK1, 0); 17 18 /* 19 * Enable uncorrectable error reporting in ERRSOU2 20 * but disable PM interrupt and CFC attention interrupt by default 21 */ 22 ADF_CSR_WR(csr, ADF_GEN4_ERRMSK2, 23 ADF_GEN4_ERRSOU2_PM_INT_BIT | 24 ADF_GEN4_ERRSOU2_CPP_CFC_ATT_INT_BITMASK); 25 26 /* 27 * Enable uncorrectable error reporting in ERRSOU3 28 * but disable RLT error interrupt and VFLR notify interrupt by default 29 */ 30 ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, 31 ADF_GEN4_ERRSOU3_RLTERROR_BIT | 32 ADF_GEN4_ERRSOU3_VFLRNOTIFY_BIT); 33 } 34 35 static void disable_errsou_reporting(void __iomem *csr) 36 { 37 u32 val = 0; 38 39 /* Disable correctable error reporting in ERRSOU0 */ 40 ADF_CSR_WR(csr, ADF_GEN4_ERRMSK0, ADF_GEN4_ERRSOU0_BIT); 41 42 /* Disable uncorrectable error reporting in ERRSOU1 */ 43 ADF_CSR_WR(csr, ADF_GEN4_ERRMSK1, ADF_GEN4_ERRSOU1_BITMASK); 44 45 /* Disable uncorrectable error reporting in ERRSOU2 */ 46 val = ADF_CSR_RD(csr, ADF_GEN4_ERRMSK2); 47 val |= ADF_GEN4_ERRSOU2_DIS_BITMASK; 48 ADF_CSR_WR(csr, ADF_GEN4_ERRMSK2, val); 49 50 /* Disable uncorrectable error reporting in ERRSOU3 */ 51 ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_ERRSOU3_BITMASK); 52 } 53 54 static void enable_ae_error_reporting(struct adf_accel_dev *accel_dev, 55 void __iomem *csr) 56 { 57 u32 ae_mask = GET_HW_DATA(accel_dev)->ae_mask; 58 59 /* Enable Acceleration Engine correctable error reporting */ 60 ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOGENABLE_CPP0, ae_mask); 61 62 /* Enable Acceleration Engine uncorrectable error reporting */ 63 ADF_CSR_WR(csr, ADF_GEN4_HIAEUNCERRLOGENABLE_CPP0, ae_mask); 64 } 65 66 static void disable_ae_error_reporting(void __iomem *csr) 67 { 68 /* Disable Acceleration Engine correctable error reporting */ 69 ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOGENABLE_CPP0, 0); 70 71 /* Disable Acceleration Engine uncorrectable error reporting */ 72 ADF_CSR_WR(csr, ADF_GEN4_HIAEUNCERRLOGENABLE_CPP0, 0); 73 } 74 75 static void enable_cpp_error_reporting(struct adf_accel_dev *accel_dev, 76 void __iomem *csr) 77 { 78 struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); 79 80 /* Enable HI CPP Agents Command Parity Error Reporting */ 81 ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOGENABLE, 82 err_mask->cppagentcmdpar_mask); 83 84 ADF_CSR_WR(csr, ADF_GEN4_CPP_CFC_ERR_CTRL, 85 ADF_GEN4_CPP_CFC_ERR_CTRL_BITMASK); 86 } 87 88 static void disable_cpp_error_reporting(void __iomem *csr) 89 { 90 /* Disable HI CPP Agents Command Parity Error Reporting */ 91 ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOGENABLE, 0); 92 93 ADF_CSR_WR(csr, ADF_GEN4_CPP_CFC_ERR_CTRL, 94 ADF_GEN4_CPP_CFC_ERR_CTRL_DIS_BITMASK); 95 } 96 97 static void enable_ti_ri_error_reporting(void __iomem *csr) 98 { 99 u32 reg; 100 101 /* Enable RI Memory error reporting */ 102 ADF_CSR_WR(csr, ADF_GEN4_RI_MEM_PAR_ERR_EN0, 103 ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK | 104 ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK); 105 106 /* Enable IOSF Primary Command Parity error Reporting */ 107 ADF_CSR_WR(csr, ADF_GEN4_RIMISCCTL, ADF_GEN4_RIMISCSTS_BIT); 108 109 /* Enable TI Internal Memory Parity Error reporting */ 110 ADF_CSR_WR(csr, ADF_GEN4_TI_CI_PAR_ERR_MASK, 0); 111 ADF_CSR_WR(csr, ADF_GEN4_TI_PULL0FUB_PAR_ERR_MASK, 0); 112 ADF_CSR_WR(csr, ADF_GEN4_TI_PUSHFUB_PAR_ERR_MASK, 0); 113 ADF_CSR_WR(csr, ADF_GEN4_TI_CD_PAR_ERR_MASK, 0); 114 ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_ERR_MASK, 0); 115 116 /* Enable error handling in RI, TI CPP interface control registers */ 117 ADF_CSR_WR(csr, ADF_GEN4_RICPPINTCTL, ADF_GEN4_RICPPINTCTL_BITMASK); 118 119 ADF_CSR_WR(csr, ADF_GEN4_TICPPINTCTL, ADF_GEN4_TICPPINTCTL_BITMASK); 120 121 /* 122 * Enable error detection and reporting in TIMISCSTS 123 * with bits 1, 2 and 30 value preserved 124 */ 125 reg = ADF_CSR_RD(csr, ADF_GEN4_TIMISCCTL); 126 reg &= ADF_GEN4_TIMSCCTL_RELAY_BITMASK; 127 reg |= ADF_GEN4_TIMISCCTL_BIT; 128 ADF_CSR_WR(csr, ADF_GEN4_TIMISCCTL, reg); 129 } 130 131 static void disable_ti_ri_error_reporting(void __iomem *csr) 132 { 133 u32 reg; 134 135 /* Disable RI Memory error reporting */ 136 ADF_CSR_WR(csr, ADF_GEN4_RI_MEM_PAR_ERR_EN0, 0); 137 138 /* Disable IOSF Primary Command Parity error Reporting */ 139 ADF_CSR_WR(csr, ADF_GEN4_RIMISCCTL, 0); 140 141 /* Disable TI Internal Memory Parity Error reporting */ 142 ADF_CSR_WR(csr, ADF_GEN4_TI_CI_PAR_ERR_MASK, 143 ADF_GEN4_TI_CI_PAR_STS_BITMASK); 144 ADF_CSR_WR(csr, ADF_GEN4_TI_PULL0FUB_PAR_ERR_MASK, 145 ADF_GEN4_TI_PULL0FUB_PAR_STS_BITMASK); 146 ADF_CSR_WR(csr, ADF_GEN4_TI_PUSHFUB_PAR_ERR_MASK, 147 ADF_GEN4_TI_PUSHFUB_PAR_STS_BITMASK); 148 ADF_CSR_WR(csr, ADF_GEN4_TI_CD_PAR_ERR_MASK, 149 ADF_GEN4_TI_CD_PAR_STS_BITMASK); 150 ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_ERR_MASK, 151 ADF_GEN4_TI_TRNSB_PAR_STS_BITMASK); 152 153 /* Disable error handling in RI, TI CPP interface control registers */ 154 ADF_CSR_WR(csr, ADF_GEN4_RICPPINTCTL, 0); 155 156 ADF_CSR_WR(csr, ADF_GEN4_TICPPINTCTL, 0); 157 158 /* 159 * Disable error detection and reporting in TIMISCSTS 160 * with bits 1, 2 and 30 value preserved 161 */ 162 reg = ADF_CSR_RD(csr, ADF_GEN4_TIMISCCTL); 163 reg &= ADF_GEN4_TIMSCCTL_RELAY_BITMASK; 164 ADF_CSR_WR(csr, ADF_GEN4_TIMISCCTL, reg); 165 } 166 167 static void enable_rf_error_reporting(struct adf_accel_dev *accel_dev, 168 void __iomem *csr) 169 { 170 struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); 171 172 /* Enable RF parity error in Shared RAM */ 173 ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_SRC, 0); 174 ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_ATH_CPH, 0); 175 ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_CPR_XLT, 0); 176 ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_DCPR_UCS, 0); 177 ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_PKE, 0); 178 179 if (err_mask->parerr_wat_wcp_mask) 180 ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_WAT_WCP, 0); 181 } 182 183 static void disable_rf_error_reporting(struct adf_accel_dev *accel_dev, 184 void __iomem *csr) 185 { 186 struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); 187 188 /* Disable RF Parity Error reporting in Shared RAM */ 189 ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_SRC, 190 ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT); 191 192 ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_ATH_CPH, 193 err_mask->parerr_ath_cph_mask); 194 195 ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_CPR_XLT, 196 err_mask->parerr_cpr_xlt_mask); 197 198 ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_DCPR_UCS, 199 err_mask->parerr_dcpr_ucs_mask); 200 201 ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_PKE, 202 err_mask->parerr_pke_mask); 203 204 if (err_mask->parerr_wat_wcp_mask) 205 ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_WAT_WCP, 206 err_mask->parerr_wat_wcp_mask); 207 } 208 209 static void enable_ssm_error_reporting(struct adf_accel_dev *accel_dev, 210 void __iomem *csr) 211 { 212 struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); 213 u32 val = 0; 214 215 /* Enable SSM interrupts */ 216 ADF_CSR_WR(csr, ADF_GEN4_INTMASKSSM, 0); 217 218 /* Enable shared memory error detection & correction */ 219 val = ADF_CSR_RD(csr, ADF_GEN4_SSMFEATREN); 220 val |= err_mask->ssmfeatren_mask; 221 ADF_CSR_WR(csr, ADF_GEN4_SSMFEATREN, val); 222 223 /* Enable SER detection in SER_err_ssmsh register */ 224 ADF_CSR_WR(csr, ADF_GEN4_SER_EN_SSMSH, 225 ADF_GEN4_SER_EN_SSMSH_BITMASK); 226 227 /* Enable SSM soft parity error */ 228 ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_ATH_CPH, 0); 229 ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_CPR_XLT, 0); 230 ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_DCPR_UCS, 0); 231 ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_PKE, 0); 232 233 if (err_mask->parerr_wat_wcp_mask) 234 ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_WAT_WCP, 0); 235 236 /* Enable slice hang interrupt reporting */ 237 ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_ATH_CPH, 0); 238 ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_CPR_XLT, 0); 239 ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_DCPR_UCS, 0); 240 ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_PKE, 0); 241 242 if (err_mask->parerr_wat_wcp_mask) 243 ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_WAT_WCP, 0); 244 } 245 246 static void disable_ssm_error_reporting(struct adf_accel_dev *accel_dev, 247 void __iomem *csr) 248 { 249 struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); 250 u32 val = 0; 251 252 /* Disable SSM interrupts */ 253 ADF_CSR_WR(csr, ADF_GEN4_INTMASKSSM, 254 ADF_GEN4_INTMASKSSM_BITMASK); 255 256 /* Disable shared memory error detection & correction */ 257 val = ADF_CSR_RD(csr, ADF_GEN4_SSMFEATREN); 258 val &= ADF_GEN4_SSMFEATREN_DIS_BITMASK; 259 ADF_CSR_WR(csr, ADF_GEN4_SSMFEATREN, val); 260 261 /* Disable SER detection in SER_err_ssmsh register */ 262 ADF_CSR_WR(csr, ADF_GEN4_SER_EN_SSMSH, 0); 263 264 /* Disable SSM soft parity error */ 265 ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_ATH_CPH, 266 err_mask->parerr_ath_cph_mask); 267 268 ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_CPR_XLT, 269 err_mask->parerr_cpr_xlt_mask); 270 271 ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_DCPR_UCS, 272 err_mask->parerr_dcpr_ucs_mask); 273 274 ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_PKE, 275 err_mask->parerr_pke_mask); 276 277 if (err_mask->parerr_wat_wcp_mask) 278 ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_WAT_WCP, 279 err_mask->parerr_wat_wcp_mask); 280 281 /* Disable slice hang interrupt reporting */ 282 ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_ATH_CPH, 283 err_mask->parerr_ath_cph_mask); 284 285 ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_CPR_XLT, 286 err_mask->parerr_cpr_xlt_mask); 287 288 ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_DCPR_UCS, 289 err_mask->parerr_dcpr_ucs_mask); 290 291 ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_PKE, 292 err_mask->parerr_pke_mask); 293 294 if (err_mask->parerr_wat_wcp_mask) 295 ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_WAT_WCP, 296 err_mask->parerr_wat_wcp_mask); 297 } 298 299 static void enable_aram_error_reporting(void __iomem *csr) 300 { 301 ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERRUERR_EN, 302 ADF_GEN4_REG_ARAMCERRUERR_EN_BITMASK); 303 304 ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERR, 305 ADF_GEN4_REG_ARAMCERR_EN_BITMASK); 306 307 ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMUERR, 308 ADF_GEN4_REG_ARAMUERR_EN_BITMASK); 309 310 ADF_CSR_WR(csr, ADF_GEN4_REG_CPPMEMTGTERR, 311 ADF_GEN4_REG_CPPMEMTGTERR_EN_BITMASK); 312 } 313 314 static void disable_aram_error_reporting(void __iomem *csr) 315 { 316 ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERRUERR_EN, 0); 317 ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERR, 0); 318 ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMUERR, 0); 319 ADF_CSR_WR(csr, ADF_GEN4_REG_CPPMEMTGTERR, 0); 320 } 321 322 static void adf_gen4_enable_ras(struct adf_accel_dev *accel_dev) 323 { 324 void __iomem *aram_csr = adf_get_aram_base(accel_dev); 325 void __iomem *csr = adf_get_pmisc_base(accel_dev); 326 327 enable_errsou_reporting(csr); 328 enable_ae_error_reporting(accel_dev, csr); 329 enable_cpp_error_reporting(accel_dev, csr); 330 enable_ti_ri_error_reporting(csr); 331 enable_rf_error_reporting(accel_dev, csr); 332 enable_ssm_error_reporting(accel_dev, csr); 333 enable_aram_error_reporting(aram_csr); 334 } 335 336 static void adf_gen4_disable_ras(struct adf_accel_dev *accel_dev) 337 { 338 void __iomem *aram_csr = adf_get_aram_base(accel_dev); 339 void __iomem *csr = adf_get_pmisc_base(accel_dev); 340 341 disable_errsou_reporting(csr); 342 disable_ae_error_reporting(csr); 343 disable_cpp_error_reporting(csr); 344 disable_ti_ri_error_reporting(csr); 345 disable_rf_error_reporting(accel_dev, csr); 346 disable_ssm_error_reporting(accel_dev, csr); 347 disable_aram_error_reporting(aram_csr); 348 } 349 350 static void adf_gen4_process_errsou0(struct adf_accel_dev *accel_dev, 351 void __iomem *csr) 352 { 353 u32 aecorrerr = ADF_CSR_RD(csr, ADF_GEN4_HIAECORERRLOG_CPP0); 354 355 aecorrerr &= GET_HW_DATA(accel_dev)->ae_mask; 356 357 dev_warn(&GET_DEV(accel_dev), 358 "Correctable error detected in AE: 0x%x\n", 359 aecorrerr); 360 361 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); 362 363 /* Clear interrupt from ERRSOU0 */ 364 ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOG_CPP0, aecorrerr); 365 } 366 367 static bool adf_handle_cpp_aeunc(struct adf_accel_dev *accel_dev, 368 void __iomem *csr, u32 errsou) 369 { 370 u32 aeuncorerr; 371 372 if (!(errsou & ADF_GEN4_ERRSOU1_HIAEUNCERRLOG_CPP0_BIT)) 373 return false; 374 375 aeuncorerr = ADF_CSR_RD(csr, ADF_GEN4_HIAEUNCERRLOG_CPP0); 376 aeuncorerr &= GET_HW_DATA(accel_dev)->ae_mask; 377 378 dev_err(&GET_DEV(accel_dev), 379 "Uncorrectable error detected in AE: 0x%x\n", 380 aeuncorerr); 381 382 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 383 384 ADF_CSR_WR(csr, ADF_GEN4_HIAEUNCERRLOG_CPP0, aeuncorerr); 385 386 return false; 387 } 388 389 static bool adf_handle_cppcmdparerr(struct adf_accel_dev *accel_dev, 390 void __iomem *csr, u32 errsou) 391 { 392 struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); 393 u32 cmdparerr; 394 395 if (!(errsou & ADF_GEN4_ERRSOU1_HICPPAGENTCMDPARERRLOG_BIT)) 396 return false; 397 398 cmdparerr = ADF_CSR_RD(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOG); 399 cmdparerr &= err_mask->cppagentcmdpar_mask; 400 401 dev_err(&GET_DEV(accel_dev), 402 "HI CPP agent command parity error: 0x%x\n", 403 cmdparerr); 404 405 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); 406 407 ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOG, cmdparerr); 408 409 return true; 410 } 411 412 static bool adf_handle_ri_mem_par_err(struct adf_accel_dev *accel_dev, 413 void __iomem *csr, u32 errsou) 414 { 415 bool reset_required = false; 416 u32 rimem_parerr_sts; 417 418 if (!(errsou & ADF_GEN4_ERRSOU1_RIMEM_PARERR_STS_BIT)) 419 return false; 420 421 rimem_parerr_sts = ADF_CSR_RD(csr, ADF_GEN4_RIMEM_PARERR_STS); 422 rimem_parerr_sts &= ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK | 423 ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK; 424 425 if (rimem_parerr_sts & ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK) { 426 dev_err(&GET_DEV(accel_dev), 427 "RI Memory Parity uncorrectable error: 0x%x\n", 428 rimem_parerr_sts); 429 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 430 } 431 432 if (rimem_parerr_sts & ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK) { 433 dev_err(&GET_DEV(accel_dev), 434 "RI Memory Parity fatal error: 0x%x\n", 435 rimem_parerr_sts); 436 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); 437 reset_required = true; 438 } 439 440 ADF_CSR_WR(csr, ADF_GEN4_RIMEM_PARERR_STS, rimem_parerr_sts); 441 442 return reset_required; 443 } 444 445 static bool adf_handle_ti_ci_par_sts(struct adf_accel_dev *accel_dev, 446 void __iomem *csr, u32 errsou) 447 { 448 u32 ti_ci_par_sts; 449 450 if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) 451 return false; 452 453 ti_ci_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_CI_PAR_STS); 454 ti_ci_par_sts &= ADF_GEN4_TI_CI_PAR_STS_BITMASK; 455 456 if (ti_ci_par_sts) { 457 dev_err(&GET_DEV(accel_dev), 458 "TI Memory Parity Error: 0x%x\n", ti_ci_par_sts); 459 ADF_CSR_WR(csr, ADF_GEN4_TI_CI_PAR_STS, ti_ci_par_sts); 460 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 461 } 462 463 return false; 464 } 465 466 static bool adf_handle_ti_pullfub_par_sts(struct adf_accel_dev *accel_dev, 467 void __iomem *csr, u32 errsou) 468 { 469 u32 ti_pullfub_par_sts; 470 471 if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) 472 return false; 473 474 ti_pullfub_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_PULL0FUB_PAR_STS); 475 ti_pullfub_par_sts &= ADF_GEN4_TI_PULL0FUB_PAR_STS_BITMASK; 476 477 if (ti_pullfub_par_sts) { 478 dev_err(&GET_DEV(accel_dev), 479 "TI Pull Parity Error: 0x%x\n", ti_pullfub_par_sts); 480 481 ADF_CSR_WR(csr, ADF_GEN4_TI_PULL0FUB_PAR_STS, 482 ti_pullfub_par_sts); 483 484 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 485 } 486 487 return false; 488 } 489 490 static bool adf_handle_ti_pushfub_par_sts(struct adf_accel_dev *accel_dev, 491 void __iomem *csr, u32 errsou) 492 { 493 u32 ti_pushfub_par_sts; 494 495 if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) 496 return false; 497 498 ti_pushfub_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_PUSHFUB_PAR_STS); 499 ti_pushfub_par_sts &= ADF_GEN4_TI_PUSHFUB_PAR_STS_BITMASK; 500 501 if (ti_pushfub_par_sts) { 502 dev_err(&GET_DEV(accel_dev), 503 "TI Push Parity Error: 0x%x\n", ti_pushfub_par_sts); 504 505 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 506 507 ADF_CSR_WR(csr, ADF_GEN4_TI_PUSHFUB_PAR_STS, 508 ti_pushfub_par_sts); 509 } 510 511 return false; 512 } 513 514 static bool adf_handle_ti_cd_par_sts(struct adf_accel_dev *accel_dev, 515 void __iomem *csr, u32 errsou) 516 { 517 u32 ti_cd_par_sts; 518 519 if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) 520 return false; 521 522 ti_cd_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_CD_PAR_STS); 523 ti_cd_par_sts &= ADF_GEN4_TI_CD_PAR_STS_BITMASK; 524 525 if (ti_cd_par_sts) { 526 dev_err(&GET_DEV(accel_dev), 527 "TI CD Parity Error: 0x%x\n", ti_cd_par_sts); 528 529 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 530 531 ADF_CSR_WR(csr, ADF_GEN4_TI_CD_PAR_STS, ti_cd_par_sts); 532 } 533 534 return false; 535 } 536 537 static bool adf_handle_ti_trnsb_par_sts(struct adf_accel_dev *accel_dev, 538 void __iomem *csr, u32 errsou) 539 { 540 u32 ti_trnsb_par_sts; 541 542 if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) 543 return false; 544 545 ti_trnsb_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_TRNSB_PAR_STS); 546 ti_trnsb_par_sts &= ADF_GEN4_TI_TRNSB_PAR_STS_BITMASK; 547 548 if (ti_trnsb_par_sts) { 549 dev_err(&GET_DEV(accel_dev), 550 "TI TRNSB Parity Error: 0x%x\n", ti_trnsb_par_sts); 551 552 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 553 554 ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_STS, ti_trnsb_par_sts); 555 } 556 557 return false; 558 } 559 560 static bool adf_handle_iosfp_cmd_parerr(struct adf_accel_dev *accel_dev, 561 void __iomem *csr, u32 errsou) 562 { 563 u32 rimiscsts; 564 565 if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) 566 return false; 567 568 rimiscsts = ADF_CSR_RD(csr, ADF_GEN4_RIMISCSTS); 569 rimiscsts &= ADF_GEN4_RIMISCSTS_BIT; 570 571 dev_err(&GET_DEV(accel_dev), 572 "Command Parity error detected on IOSFP: 0x%x\n", 573 rimiscsts); 574 575 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); 576 577 ADF_CSR_WR(csr, ADF_GEN4_RIMISCSTS, rimiscsts); 578 579 return true; 580 } 581 582 static void adf_gen4_process_errsou1(struct adf_accel_dev *accel_dev, 583 void __iomem *csr, u32 errsou, 584 bool *reset_required) 585 { 586 *reset_required |= adf_handle_cpp_aeunc(accel_dev, csr, errsou); 587 *reset_required |= adf_handle_cppcmdparerr(accel_dev, csr, errsou); 588 *reset_required |= adf_handle_ri_mem_par_err(accel_dev, csr, errsou); 589 *reset_required |= adf_handle_ti_ci_par_sts(accel_dev, csr, errsou); 590 *reset_required |= adf_handle_ti_pullfub_par_sts(accel_dev, csr, errsou); 591 *reset_required |= adf_handle_ti_pushfub_par_sts(accel_dev, csr, errsou); 592 *reset_required |= adf_handle_ti_cd_par_sts(accel_dev, csr, errsou); 593 *reset_required |= adf_handle_ti_trnsb_par_sts(accel_dev, csr, errsou); 594 *reset_required |= adf_handle_iosfp_cmd_parerr(accel_dev, csr, errsou); 595 } 596 597 static bool adf_handle_uerrssmsh(struct adf_accel_dev *accel_dev, 598 void __iomem *csr, u32 iastatssm) 599 { 600 u32 reg; 601 602 if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_UERRSSMSH_BIT)) 603 return false; 604 605 reg = ADF_CSR_RD(csr, ADF_GEN4_UERRSSMSH); 606 reg &= ADF_GEN4_UERRSSMSH_BITMASK; 607 608 dev_err(&GET_DEV(accel_dev), 609 "Uncorrectable error on ssm shared memory: 0x%x\n", 610 reg); 611 612 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 613 614 ADF_CSR_WR(csr, ADF_GEN4_UERRSSMSH, reg); 615 616 return false; 617 } 618 619 static bool adf_handle_cerrssmsh(struct adf_accel_dev *accel_dev, 620 void __iomem *csr, u32 iastatssm) 621 { 622 u32 reg; 623 624 if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_CERRSSMSH_BIT)) 625 return false; 626 627 reg = ADF_CSR_RD(csr, ADF_GEN4_CERRSSMSH); 628 reg &= ADF_GEN4_CERRSSMSH_ERROR_BIT; 629 630 dev_warn(&GET_DEV(accel_dev), 631 "Correctable error on ssm shared memory: 0x%x\n", 632 reg); 633 634 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); 635 636 ADF_CSR_WR(csr, ADF_GEN4_CERRSSMSH, reg); 637 638 return false; 639 } 640 641 static bool adf_handle_pperr_err(struct adf_accel_dev *accel_dev, 642 void __iomem *csr, u32 iastatssm) 643 { 644 u32 reg; 645 646 if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_PPERR_BIT)) 647 return false; 648 649 reg = ADF_CSR_RD(csr, ADF_GEN4_PPERR); 650 reg &= ADF_GEN4_PPERR_BITMASK; 651 652 dev_err(&GET_DEV(accel_dev), 653 "Uncorrectable error CPP transaction on memory target: 0x%x\n", 654 reg); 655 656 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 657 658 ADF_CSR_WR(csr, ADF_GEN4_PPERR, reg); 659 660 return false; 661 } 662 663 static void adf_poll_slicehang_csr(struct adf_accel_dev *accel_dev, 664 void __iomem *csr, u32 slice_hang_offset, 665 char *slice_name) 666 { 667 u32 slice_hang_reg = ADF_CSR_RD(csr, slice_hang_offset); 668 669 if (!slice_hang_reg) 670 return; 671 672 dev_err(&GET_DEV(accel_dev), 673 "Slice %s hang error encountered\n", slice_name); 674 675 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 676 } 677 678 static bool adf_handle_slice_hang_error(struct adf_accel_dev *accel_dev, 679 void __iomem *csr, u32 iastatssm) 680 { 681 struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); 682 683 if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SLICEHANG_ERR_BIT)) 684 return false; 685 686 adf_poll_slicehang_csr(accel_dev, csr, 687 ADF_GEN4_SLICEHANGSTATUS_ATH_CPH, "ath_cph"); 688 adf_poll_slicehang_csr(accel_dev, csr, 689 ADF_GEN4_SLICEHANGSTATUS_CPR_XLT, "cpr_xlt"); 690 adf_poll_slicehang_csr(accel_dev, csr, 691 ADF_GEN4_SLICEHANGSTATUS_DCPR_UCS, "dcpr_ucs"); 692 adf_poll_slicehang_csr(accel_dev, csr, 693 ADF_GEN4_SLICEHANGSTATUS_PKE, "pke"); 694 695 if (err_mask->parerr_wat_wcp_mask) 696 adf_poll_slicehang_csr(accel_dev, csr, 697 ADF_GEN4_SLICEHANGSTATUS_WAT_WCP, 698 "ath_cph"); 699 700 return false; 701 } 702 703 static bool adf_handle_spp_pullcmd_err(struct adf_accel_dev *accel_dev, 704 void __iomem *csr) 705 { 706 struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); 707 bool reset_required = false; 708 u32 reg; 709 710 reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_ATH_CPH); 711 reg &= err_mask->parerr_ath_cph_mask; 712 if (reg) { 713 dev_err(&GET_DEV(accel_dev), 714 "SPP pull command fatal error ATH_CPH: 0x%x\n", reg); 715 716 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); 717 718 ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_ATH_CPH, reg); 719 720 reset_required = true; 721 } 722 723 reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_CPR_XLT); 724 reg &= err_mask->parerr_cpr_xlt_mask; 725 if (reg) { 726 dev_err(&GET_DEV(accel_dev), 727 "SPP pull command fatal error CPR_XLT: 0x%x\n", reg); 728 729 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); 730 731 ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_CPR_XLT, reg); 732 733 reset_required = true; 734 } 735 736 reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_DCPR_UCS); 737 reg &= err_mask->parerr_dcpr_ucs_mask; 738 if (reg) { 739 dev_err(&GET_DEV(accel_dev), 740 "SPP pull command fatal error DCPR_UCS: 0x%x\n", reg); 741 742 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); 743 744 ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_DCPR_UCS, reg); 745 746 reset_required = true; 747 } 748 749 reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_PKE); 750 reg &= err_mask->parerr_pke_mask; 751 if (reg) { 752 dev_err(&GET_DEV(accel_dev), 753 "SPP pull command fatal error PKE: 0x%x\n", reg); 754 755 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); 756 757 ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_PKE, reg); 758 759 reset_required = true; 760 } 761 762 if (err_mask->parerr_wat_wcp_mask) { 763 reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_WAT_WCP); 764 reg &= err_mask->parerr_wat_wcp_mask; 765 if (reg) { 766 dev_err(&GET_DEV(accel_dev), 767 "SPP pull command fatal error WAT_WCP: 0x%x\n", reg); 768 769 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); 770 771 ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_WAT_WCP, reg); 772 773 reset_required = true; 774 } 775 } 776 777 return reset_required; 778 } 779 780 static bool adf_handle_spp_pulldata_err(struct adf_accel_dev *accel_dev, 781 void __iomem *csr) 782 { 783 struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); 784 u32 reg; 785 786 reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_ATH_CPH); 787 reg &= err_mask->parerr_ath_cph_mask; 788 if (reg) { 789 dev_err(&GET_DEV(accel_dev), 790 "SPP pull data err ATH_CPH: 0x%x\n", reg); 791 792 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 793 794 ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_ATH_CPH, reg); 795 } 796 797 reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_CPR_XLT); 798 reg &= err_mask->parerr_cpr_xlt_mask; 799 if (reg) { 800 dev_err(&GET_DEV(accel_dev), 801 "SPP pull data err CPR_XLT: 0x%x\n", reg); 802 803 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 804 805 ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_CPR_XLT, reg); 806 } 807 808 reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_DCPR_UCS); 809 reg &= err_mask->parerr_dcpr_ucs_mask; 810 if (reg) { 811 dev_err(&GET_DEV(accel_dev), 812 "SPP pull data err DCPR_UCS: 0x%x\n", reg); 813 814 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 815 816 ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_DCPR_UCS, reg); 817 } 818 819 reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_PKE); 820 reg &= err_mask->parerr_pke_mask; 821 if (reg) { 822 dev_err(&GET_DEV(accel_dev), 823 "SPP pull data err PKE: 0x%x\n", reg); 824 825 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 826 827 ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_PKE, reg); 828 } 829 830 if (err_mask->parerr_wat_wcp_mask) { 831 reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_WAT_WCP); 832 reg &= err_mask->parerr_wat_wcp_mask; 833 if (reg) { 834 dev_err(&GET_DEV(accel_dev), 835 "SPP pull data err WAT_WCP: 0x%x\n", reg); 836 837 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 838 839 ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_WAT_WCP, reg); 840 } 841 } 842 843 return false; 844 } 845 846 static bool adf_handle_spp_pushcmd_err(struct adf_accel_dev *accel_dev, 847 void __iomem *csr) 848 { 849 struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); 850 bool reset_required = false; 851 u32 reg; 852 853 reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_ATH_CPH); 854 reg &= err_mask->parerr_ath_cph_mask; 855 if (reg) { 856 dev_err(&GET_DEV(accel_dev), 857 "SPP push command fatal error ATH_CPH: 0x%x\n", reg); 858 859 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); 860 861 ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_ATH_CPH, reg); 862 863 reset_required = true; 864 } 865 866 reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_CPR_XLT); 867 reg &= err_mask->parerr_cpr_xlt_mask; 868 if (reg) { 869 dev_err(&GET_DEV(accel_dev), 870 "SPP push command fatal error CPR_XLT: 0x%x\n", reg); 871 872 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); 873 874 ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_CPR_XLT, reg); 875 876 reset_required = true; 877 } 878 879 reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_DCPR_UCS); 880 reg &= err_mask->parerr_dcpr_ucs_mask; 881 if (reg) { 882 dev_err(&GET_DEV(accel_dev), 883 "SPP push command fatal error DCPR_UCS: 0x%x\n", reg); 884 885 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); 886 887 ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_DCPR_UCS, reg); 888 889 reset_required = true; 890 } 891 892 reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_PKE); 893 reg &= err_mask->parerr_pke_mask; 894 if (reg) { 895 dev_err(&GET_DEV(accel_dev), 896 "SPP push command fatal error PKE: 0x%x\n", 897 reg); 898 899 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); 900 901 ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_PKE, reg); 902 903 reset_required = true; 904 } 905 906 if (err_mask->parerr_wat_wcp_mask) { 907 reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_WAT_WCP); 908 reg &= err_mask->parerr_wat_wcp_mask; 909 if (reg) { 910 dev_err(&GET_DEV(accel_dev), 911 "SPP push command fatal error WAT_WCP: 0x%x\n", reg); 912 913 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); 914 915 ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_WAT_WCP, reg); 916 917 reset_required = true; 918 } 919 } 920 921 return reset_required; 922 } 923 924 static bool adf_handle_spp_pushdata_err(struct adf_accel_dev *accel_dev, 925 void __iomem *csr) 926 { 927 struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); 928 u32 reg; 929 930 reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_ATH_CPH); 931 reg &= err_mask->parerr_ath_cph_mask; 932 if (reg) { 933 dev_err(&GET_DEV(accel_dev), 934 "SPP push data err ATH_CPH: 0x%x\n", reg); 935 936 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 937 938 ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_ATH_CPH, reg); 939 } 940 941 reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_CPR_XLT); 942 reg &= err_mask->parerr_cpr_xlt_mask; 943 if (reg) { 944 dev_err(&GET_DEV(accel_dev), 945 "SPP push data err CPR_XLT: 0x%x\n", reg); 946 947 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 948 949 ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_CPR_XLT, reg); 950 } 951 952 reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_DCPR_UCS); 953 reg &= err_mask->parerr_dcpr_ucs_mask; 954 if (reg) { 955 dev_err(&GET_DEV(accel_dev), 956 "SPP push data err DCPR_UCS: 0x%x\n", reg); 957 958 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 959 960 ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_DCPR_UCS, reg); 961 } 962 963 reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_PKE); 964 reg &= err_mask->parerr_pke_mask; 965 if (reg) { 966 dev_err(&GET_DEV(accel_dev), 967 "SPP push data err PKE: 0x%x\n", reg); 968 969 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 970 971 ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_PKE, reg); 972 } 973 974 if (err_mask->parerr_wat_wcp_mask) { 975 reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_WAT_WCP); 976 reg &= err_mask->parerr_wat_wcp_mask; 977 if (reg) { 978 dev_err(&GET_DEV(accel_dev), 979 "SPP push data err WAT_WCP: 0x%x\n", reg); 980 981 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 982 983 ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_WAT_WCP, 984 reg); 985 } 986 } 987 988 return false; 989 } 990 991 static bool adf_handle_spppar_err(struct adf_accel_dev *accel_dev, 992 void __iomem *csr, u32 iastatssm) 993 { 994 bool reset_required; 995 996 if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SPPPARERR_BIT)) 997 return false; 998 999 reset_required = adf_handle_spp_pullcmd_err(accel_dev, csr); 1000 reset_required |= adf_handle_spp_pulldata_err(accel_dev, csr); 1001 reset_required |= adf_handle_spp_pushcmd_err(accel_dev, csr); 1002 reset_required |= adf_handle_spp_pushdata_err(accel_dev, csr); 1003 1004 return reset_required; 1005 } 1006 1007 static bool adf_handle_ssmcpppar_err(struct adf_accel_dev *accel_dev, 1008 void __iomem *csr, u32 iastatssm) 1009 { 1010 u32 reg = ADF_CSR_RD(csr, ADF_GEN4_SSMCPPERR); 1011 u32 bits_num = BITS_PER_REG(reg); 1012 bool reset_required = false; 1013 unsigned long errs_bits; 1014 u32 bit_iterator; 1015 1016 if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SSMCPPERR_BIT)) 1017 return false; 1018 1019 reg = ADF_CSR_RD(csr, ADF_GEN4_SSMCPPERR); 1020 reg &= ADF_GEN4_SSMCPPERR_FATAL_BITMASK | ADF_GEN4_SSMCPPERR_UNCERR_BITMASK; 1021 if (reg & ADF_GEN4_SSMCPPERR_FATAL_BITMASK) { 1022 dev_err(&GET_DEV(accel_dev), 1023 "Fatal SSM CPP parity error: 0x%x\n", reg); 1024 1025 errs_bits = reg & ADF_GEN4_SSMCPPERR_FATAL_BITMASK; 1026 for_each_set_bit(bit_iterator, &errs_bits, bits_num) { 1027 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); 1028 } 1029 reset_required = true; 1030 } 1031 1032 if (reg & ADF_GEN4_SSMCPPERR_UNCERR_BITMASK) { 1033 dev_err(&GET_DEV(accel_dev), 1034 "non-Fatal SSM CPP parity error: 0x%x\n", reg); 1035 errs_bits = reg & ADF_GEN4_SSMCPPERR_UNCERR_BITMASK; 1036 1037 for_each_set_bit(bit_iterator, &errs_bits, bits_num) { 1038 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 1039 } 1040 } 1041 1042 ADF_CSR_WR(csr, ADF_GEN4_SSMCPPERR, reg); 1043 1044 return reset_required; 1045 } 1046 1047 static bool adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev, 1048 void __iomem *csr, u32 iastatssm) 1049 { 1050 struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); 1051 u32 reg; 1052 1053 if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT)) 1054 return false; 1055 1056 reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC); 1057 reg &= ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT; 1058 if (reg) { 1059 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 1060 ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC, reg); 1061 } 1062 1063 reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH); 1064 reg &= err_mask->parerr_ath_cph_mask; 1065 if (reg) { 1066 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 1067 ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH, reg); 1068 } 1069 1070 reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT); 1071 reg &= err_mask->parerr_cpr_xlt_mask; 1072 if (reg) { 1073 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 1074 ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT, reg); 1075 } 1076 1077 reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS); 1078 reg &= err_mask->parerr_dcpr_ucs_mask; 1079 if (reg) { 1080 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 1081 ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS, reg); 1082 } 1083 1084 reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE); 1085 reg &= err_mask->parerr_pke_mask; 1086 if (reg) { 1087 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 1088 ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE, reg); 1089 } 1090 1091 if (err_mask->parerr_wat_wcp_mask) { 1092 reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP); 1093 reg &= err_mask->parerr_wat_wcp_mask; 1094 if (reg) { 1095 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 1096 ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP, 1097 reg); 1098 } 1099 } 1100 1101 dev_err(&GET_DEV(accel_dev), "Slice ssm soft parity error reported"); 1102 1103 return false; 1104 } 1105 1106 static bool adf_handle_ser_err_ssmsh(struct adf_accel_dev *accel_dev, 1107 void __iomem *csr, u32 iastatssm) 1108 { 1109 u32 reg = ADF_CSR_RD(csr, ADF_GEN4_SER_ERR_SSMSH); 1110 u32 bits_num = BITS_PER_REG(reg); 1111 bool reset_required = false; 1112 unsigned long errs_bits; 1113 u32 bit_iterator; 1114 1115 if (!(iastatssm & (ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_CERR_BIT | 1116 ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_UNCERR_BIT))) 1117 return false; 1118 1119 reg = ADF_CSR_RD(csr, ADF_GEN4_SER_ERR_SSMSH); 1120 reg &= ADF_GEN4_SER_ERR_SSMSH_FATAL_BITMASK | 1121 ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK | 1122 ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK; 1123 if (reg & ADF_GEN4_SER_ERR_SSMSH_FATAL_BITMASK) { 1124 dev_err(&GET_DEV(accel_dev), 1125 "Fatal SER_SSMSH_ERR: 0x%x\n", reg); 1126 1127 errs_bits = reg & ADF_GEN4_SER_ERR_SSMSH_FATAL_BITMASK; 1128 for_each_set_bit(bit_iterator, &errs_bits, bits_num) { 1129 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); 1130 } 1131 1132 reset_required = true; 1133 } 1134 1135 if (reg & ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK) { 1136 dev_err(&GET_DEV(accel_dev), 1137 "non-fatal SER_SSMSH_ERR: 0x%x\n", reg); 1138 1139 errs_bits = reg & ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK; 1140 for_each_set_bit(bit_iterator, &errs_bits, bits_num) { 1141 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 1142 } 1143 } 1144 1145 if (reg & ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK) { 1146 dev_warn(&GET_DEV(accel_dev), 1147 "Correctable SER_SSMSH_ERR: 0x%x\n", reg); 1148 1149 errs_bits = reg & ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK; 1150 for_each_set_bit(bit_iterator, &errs_bits, bits_num) { 1151 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); 1152 } 1153 } 1154 1155 ADF_CSR_WR(csr, ADF_GEN4_SER_ERR_SSMSH, reg); 1156 1157 return reset_required; 1158 } 1159 1160 static bool adf_handle_iaintstatssm(struct adf_accel_dev *accel_dev, 1161 void __iomem *csr) 1162 { 1163 u32 iastatssm = ADF_CSR_RD(csr, ADF_GEN4_IAINTSTATSSM); 1164 bool reset_required; 1165 1166 iastatssm &= ADF_GEN4_IAINTSTATSSM_BITMASK; 1167 if (!iastatssm) 1168 return false; 1169 1170 reset_required = adf_handle_uerrssmsh(accel_dev, csr, iastatssm); 1171 reset_required |= adf_handle_cerrssmsh(accel_dev, csr, iastatssm); 1172 reset_required |= adf_handle_pperr_err(accel_dev, csr, iastatssm); 1173 reset_required |= adf_handle_slice_hang_error(accel_dev, csr, iastatssm); 1174 reset_required |= adf_handle_spppar_err(accel_dev, csr, iastatssm); 1175 reset_required |= adf_handle_ssmcpppar_err(accel_dev, csr, iastatssm); 1176 reset_required |= adf_handle_rf_parr_err(accel_dev, csr, iastatssm); 1177 reset_required |= adf_handle_ser_err_ssmsh(accel_dev, csr, iastatssm); 1178 1179 ADF_CSR_WR(csr, ADF_GEN4_IAINTSTATSSM, iastatssm); 1180 1181 return reset_required; 1182 } 1183 1184 static bool adf_handle_exprpssmcmpr(struct adf_accel_dev *accel_dev, 1185 void __iomem *csr) 1186 { 1187 u32 reg = ADF_CSR_RD(csr, ADF_GEN4_EXPRPSSMCPR); 1188 1189 reg &= ADF_GEN4_EXPRPSSMCPR_UNCERR_BITMASK; 1190 if (!reg) 1191 return false; 1192 1193 dev_err(&GET_DEV(accel_dev), 1194 "Uncorrectable error exception in SSM CMP: 0x%x", reg); 1195 1196 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 1197 1198 ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMCPR, reg); 1199 1200 return false; 1201 } 1202 1203 static bool adf_handle_exprpssmxlt(struct adf_accel_dev *accel_dev, 1204 void __iomem *csr) 1205 { 1206 u32 reg = ADF_CSR_RD(csr, ADF_GEN4_EXPRPSSMXLT); 1207 1208 reg &= ADF_GEN4_EXPRPSSMXLT_UNCERR_BITMASK | 1209 ADF_GEN4_EXPRPSSMXLT_CERR_BIT; 1210 if (!reg) 1211 return false; 1212 1213 if (reg & ADF_GEN4_EXPRPSSMXLT_UNCERR_BITMASK) { 1214 dev_err(&GET_DEV(accel_dev), 1215 "Uncorrectable error exception in SSM XLT: 0x%x", reg); 1216 1217 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 1218 } 1219 1220 if (reg & ADF_GEN4_EXPRPSSMXLT_CERR_BIT) { 1221 dev_warn(&GET_DEV(accel_dev), 1222 "Correctable error exception in SSM XLT: 0x%x", reg); 1223 1224 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); 1225 } 1226 1227 ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMXLT, reg); 1228 1229 return false; 1230 } 1231 1232 static bool adf_handle_exprpssmdcpr(struct adf_accel_dev *accel_dev, 1233 void __iomem *csr) 1234 { 1235 u32 reg; 1236 int i; 1237 1238 for (i = 0; i < ADF_GEN4_DCPR_SLICES_NUM; i++) { 1239 reg = ADF_CSR_RD(csr, ADF_GEN4_EXPRPSSMDCPR(i)); 1240 reg &= ADF_GEN4_EXPRPSSMDCPR_UNCERR_BITMASK | 1241 ADF_GEN4_EXPRPSSMDCPR_CERR_BITMASK; 1242 if (!reg) 1243 continue; 1244 1245 if (reg & ADF_GEN4_EXPRPSSMDCPR_UNCERR_BITMASK) { 1246 dev_err(&GET_DEV(accel_dev), 1247 "Uncorrectable error exception in SSM DCMP: 0x%x", reg); 1248 1249 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 1250 } 1251 1252 if (reg & ADF_GEN4_EXPRPSSMDCPR_CERR_BITMASK) { 1253 dev_warn(&GET_DEV(accel_dev), 1254 "Correctable error exception in SSM DCMP: 0x%x", reg); 1255 1256 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); 1257 } 1258 1259 ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMDCPR(i), reg); 1260 } 1261 1262 return false; 1263 } 1264 1265 static bool adf_handle_ssm(struct adf_accel_dev *accel_dev, void __iomem *csr, 1266 u32 errsou) 1267 { 1268 bool reset_required; 1269 1270 if (!(errsou & ADF_GEN4_ERRSOU2_SSM_ERR_BIT)) 1271 return false; 1272 1273 reset_required = adf_handle_iaintstatssm(accel_dev, csr); 1274 reset_required |= adf_handle_exprpssmcmpr(accel_dev, csr); 1275 reset_required |= adf_handle_exprpssmxlt(accel_dev, csr); 1276 reset_required |= adf_handle_exprpssmdcpr(accel_dev, csr); 1277 1278 return reset_required; 1279 } 1280 1281 static bool adf_handle_cpp_cfc_err(struct adf_accel_dev *accel_dev, 1282 void __iomem *csr, u32 errsou) 1283 { 1284 bool reset_required = false; 1285 u32 reg; 1286 1287 if (!(errsou & ADF_GEN4_ERRSOU2_CPP_CFC_ERR_STATUS_BIT)) 1288 return false; 1289 1290 reg = ADF_CSR_RD(csr, ADF_GEN4_CPP_CFC_ERR_STATUS); 1291 if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_DATAPAR_BIT) { 1292 dev_err(&GET_DEV(accel_dev), 1293 "CPP_CFC_ERR: data parity: 0x%x", reg); 1294 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 1295 } 1296 1297 if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_CMDPAR_BIT) { 1298 dev_err(&GET_DEV(accel_dev), 1299 "CPP_CFC_ERR: command parity: 0x%x", reg); 1300 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); 1301 1302 reset_required = true; 1303 } 1304 1305 if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_MERR_BIT) { 1306 dev_err(&GET_DEV(accel_dev), 1307 "CPP_CFC_ERR: multiple errors: 0x%x", reg); 1308 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); 1309 1310 reset_required = true; 1311 } 1312 1313 ADF_CSR_WR(csr, ADF_GEN4_CPP_CFC_ERR_STATUS_CLR, 1314 ADF_GEN4_CPP_CFC_ERR_STATUS_CLR_BITMASK); 1315 1316 return reset_required; 1317 } 1318 1319 static void adf_gen4_process_errsou2(struct adf_accel_dev *accel_dev, 1320 void __iomem *csr, u32 errsou, 1321 bool *reset_required) 1322 { 1323 *reset_required |= adf_handle_ssm(accel_dev, csr, errsou); 1324 *reset_required |= adf_handle_cpp_cfc_err(accel_dev, csr, errsou); 1325 } 1326 1327 static bool adf_handle_timiscsts(struct adf_accel_dev *accel_dev, 1328 void __iomem *csr, u32 errsou) 1329 { 1330 u32 timiscsts; 1331 1332 if (!(errsou & ADF_GEN4_ERRSOU3_TIMISCSTS_BIT)) 1333 return false; 1334 1335 timiscsts = ADF_CSR_RD(csr, ADF_GEN4_TIMISCSTS); 1336 1337 dev_err(&GET_DEV(accel_dev), 1338 "Fatal error in Transmit Interface: 0x%x\n", timiscsts); 1339 1340 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); 1341 1342 return true; 1343 } 1344 1345 static bool adf_handle_ricppintsts(struct adf_accel_dev *accel_dev, 1346 void __iomem *csr, u32 errsou) 1347 { 1348 u32 ricppintsts; 1349 1350 if (!(errsou & ADF_GEN4_ERRSOU3_RICPPINTSTS_BITMASK)) 1351 return false; 1352 1353 ricppintsts = ADF_CSR_RD(csr, ADF_GEN4_RICPPINTSTS); 1354 ricppintsts &= ADF_GEN4_RICPPINTSTS_BITMASK; 1355 1356 dev_err(&GET_DEV(accel_dev), 1357 "RI CPP Uncorrectable Error: 0x%x\n", ricppintsts); 1358 1359 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 1360 1361 ADF_CSR_WR(csr, ADF_GEN4_RICPPINTSTS, ricppintsts); 1362 1363 return false; 1364 } 1365 1366 static bool adf_handle_ticppintsts(struct adf_accel_dev *accel_dev, 1367 void __iomem *csr, u32 errsou) 1368 { 1369 u32 ticppintsts; 1370 1371 if (!(errsou & ADF_GEN4_ERRSOU3_TICPPINTSTS_BITMASK)) 1372 return false; 1373 1374 ticppintsts = ADF_CSR_RD(csr, ADF_GEN4_TICPPINTSTS); 1375 ticppintsts &= ADF_GEN4_TICPPINTSTS_BITMASK; 1376 1377 dev_err(&GET_DEV(accel_dev), 1378 "TI CPP Uncorrectable Error: 0x%x\n", ticppintsts); 1379 1380 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 1381 1382 ADF_CSR_WR(csr, ADF_GEN4_TICPPINTSTS, ticppintsts); 1383 1384 return false; 1385 } 1386 1387 static bool adf_handle_aramcerr(struct adf_accel_dev *accel_dev, 1388 void __iomem *csr, u32 errsou) 1389 { 1390 u32 aram_cerr; 1391 1392 if (!(errsou & ADF_GEN4_ERRSOU3_REG_ARAMCERR_BIT)) 1393 return false; 1394 1395 aram_cerr = ADF_CSR_RD(csr, ADF_GEN4_REG_ARAMCERR); 1396 aram_cerr &= ADF_GEN4_REG_ARAMCERR_BIT; 1397 1398 dev_warn(&GET_DEV(accel_dev), 1399 "ARAM correctable error : 0x%x\n", aram_cerr); 1400 1401 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); 1402 1403 aram_cerr |= ADF_GEN4_REG_ARAMCERR_EN_BITMASK; 1404 1405 ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERR, aram_cerr); 1406 1407 return false; 1408 } 1409 1410 static bool adf_handle_aramuerr(struct adf_accel_dev *accel_dev, 1411 void __iomem *csr, u32 errsou) 1412 { 1413 bool reset_required = false; 1414 u32 aramuerr; 1415 1416 if (!(errsou & ADF_GEN4_ERRSOU3_REG_ARAMUERR_BIT)) 1417 return false; 1418 1419 aramuerr = ADF_CSR_RD(csr, ADF_GEN4_REG_ARAMUERR); 1420 aramuerr &= ADF_GEN4_REG_ARAMUERR_ERROR_BIT | 1421 ADF_GEN4_REG_ARAMUERR_MULTI_ERRORS_BIT; 1422 1423 if (!aramuerr) 1424 return false; 1425 1426 if (aramuerr & ADF_GEN4_REG_ARAMUERR_MULTI_ERRORS_BIT) { 1427 dev_err(&GET_DEV(accel_dev), 1428 "ARAM multiple uncorrectable errors: 0x%x\n", aramuerr); 1429 1430 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); 1431 1432 reset_required = true; 1433 } else { 1434 dev_err(&GET_DEV(accel_dev), 1435 "ARAM uncorrectable error: 0x%x\n", aramuerr); 1436 1437 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 1438 } 1439 1440 aramuerr |= ADF_GEN4_REG_ARAMUERR_EN_BITMASK; 1441 1442 ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMUERR, aramuerr); 1443 1444 return reset_required; 1445 } 1446 1447 static bool adf_handle_reg_cppmemtgterr(struct adf_accel_dev *accel_dev, 1448 void __iomem *csr, u32 errsou) 1449 { 1450 bool reset_required = false; 1451 u32 cppmemtgterr; 1452 1453 if (!(errsou & ADF_GEN4_ERRSOU3_REG_ARAMUERR_BIT)) 1454 return false; 1455 1456 cppmemtgterr = ADF_CSR_RD(csr, ADF_GEN4_REG_CPPMEMTGTERR); 1457 cppmemtgterr &= ADF_GEN4_REG_CPPMEMTGTERR_BITMASK | 1458 ADF_GEN4_REG_CPPMEMTGTERR_MULTI_ERRORS_BIT; 1459 if (!cppmemtgterr) 1460 return false; 1461 1462 if (cppmemtgterr & ADF_GEN4_REG_CPPMEMTGTERR_MULTI_ERRORS_BIT) { 1463 dev_err(&GET_DEV(accel_dev), 1464 "Misc memory target multiple uncorrectable errors: 0x%x\n", 1465 cppmemtgterr); 1466 1467 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); 1468 1469 reset_required = true; 1470 } else { 1471 dev_err(&GET_DEV(accel_dev), 1472 "Misc memory target uncorrectable error: 0x%x\n", cppmemtgterr); 1473 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 1474 } 1475 1476 cppmemtgterr |= ADF_GEN4_REG_CPPMEMTGTERR_EN_BITMASK; 1477 1478 ADF_CSR_WR(csr, ADF_GEN4_REG_CPPMEMTGTERR, cppmemtgterr); 1479 1480 return reset_required; 1481 } 1482 1483 static bool adf_handle_atufaultstatus(struct adf_accel_dev *accel_dev, 1484 void __iomem *csr, u32 errsou) 1485 { 1486 u32 i; 1487 u32 max_rp_num = GET_HW_DATA(accel_dev)->num_banks; 1488 1489 if (!(errsou & ADF_GEN4_ERRSOU3_ATUFAULTSTATUS_BIT)) 1490 return false; 1491 1492 for (i = 0; i < max_rp_num; i++) { 1493 u32 atufaultstatus = ADF_CSR_RD(csr, ADF_GEN4_ATUFAULTSTATUS(i)); 1494 1495 atufaultstatus &= ADF_GEN4_ATUFAULTSTATUS_BIT; 1496 1497 if (atufaultstatus) { 1498 dev_err(&GET_DEV(accel_dev), 1499 "Ring Pair (%u) ATU detected fault: 0x%x\n", i, 1500 atufaultstatus); 1501 1502 ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); 1503 1504 ADF_CSR_WR(csr, ADF_GEN4_ATUFAULTSTATUS(i), atufaultstatus); 1505 } 1506 } 1507 1508 return false; 1509 } 1510 1511 static void adf_gen4_process_errsou3(struct adf_accel_dev *accel_dev, 1512 void __iomem *csr, void __iomem *aram_csr, 1513 u32 errsou, bool *reset_required) 1514 { 1515 *reset_required |= adf_handle_timiscsts(accel_dev, csr, errsou); 1516 *reset_required |= adf_handle_ricppintsts(accel_dev, csr, errsou); 1517 *reset_required |= adf_handle_ticppintsts(accel_dev, csr, errsou); 1518 *reset_required |= adf_handle_aramcerr(accel_dev, aram_csr, errsou); 1519 *reset_required |= adf_handle_aramuerr(accel_dev, aram_csr, errsou); 1520 *reset_required |= adf_handle_reg_cppmemtgterr(accel_dev, aram_csr, errsou); 1521 *reset_required |= adf_handle_atufaultstatus(accel_dev, csr, errsou); 1522 } 1523 1524 static bool adf_gen4_handle_interrupt(struct adf_accel_dev *accel_dev, 1525 bool *reset_required) 1526 { 1527 void __iomem *aram_csr = adf_get_aram_base(accel_dev); 1528 void __iomem *csr = adf_get_pmisc_base(accel_dev); 1529 u32 errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU0); 1530 bool handled = false; 1531 1532 *reset_required = false; 1533 1534 if (errsou & ADF_GEN4_ERRSOU0_BIT) { 1535 adf_gen4_process_errsou0(accel_dev, csr); 1536 handled = true; 1537 } 1538 1539 errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU1); 1540 if (errsou & ADF_GEN4_ERRSOU1_BITMASK) { 1541 adf_gen4_process_errsou1(accel_dev, csr, errsou, reset_required); 1542 handled = true; 1543 } 1544 1545 errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU2); 1546 if (errsou & ADF_GEN4_ERRSOU2_BITMASK) { 1547 adf_gen4_process_errsou2(accel_dev, csr, errsou, reset_required); 1548 handled = true; 1549 } 1550 1551 errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU3); 1552 if (errsou & ADF_GEN4_ERRSOU3_BITMASK) { 1553 adf_gen4_process_errsou3(accel_dev, csr, aram_csr, errsou, reset_required); 1554 handled = true; 1555 } 1556 1557 return handled; 1558 } 1559 1560 void adf_gen4_init_ras_ops(struct adf_ras_ops *ras_ops) 1561 { 1562 ras_ops->enable_ras_errors = adf_gen4_enable_ras; 1563 ras_ops->disable_ras_errors = adf_gen4_disable_ras; 1564 ras_ops->handle_interrupt = adf_gen4_handle_interrupt; 1565 } 1566 EXPORT_SYMBOL_GPL(adf_gen4_init_ras_ops); 1567