xref: /linux/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c (revision 79790b6818e96c58fe2bffee1b418c16e64e7b80)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2023 Intel Corporation */
3 #include "adf_common_drv.h"
4 #include "adf_gen4_hw_data.h"
5 #include "adf_gen4_ras.h"
6 #include "adf_sysfs_ras_counters.h"
7 
8 #define BITS_PER_REG(_n_) (sizeof(_n_) * BITS_PER_BYTE)
9 
enable_errsou_reporting(void __iomem * csr)10 static void enable_errsou_reporting(void __iomem *csr)
11 {
12 	/* Enable correctable error reporting in ERRSOU0 */
13 	ADF_CSR_WR(csr, ADF_GEN4_ERRMSK0, 0);
14 
15 	/* Enable uncorrectable error reporting in ERRSOU1 */
16 	ADF_CSR_WR(csr, ADF_GEN4_ERRMSK1, 0);
17 
18 	/*
19 	 * Enable uncorrectable error reporting in ERRSOU2
20 	 * but disable PM interrupt and CFC attention interrupt by default
21 	 */
22 	ADF_CSR_WR(csr, ADF_GEN4_ERRMSK2,
23 		   ADF_GEN4_ERRSOU2_PM_INT_BIT |
24 		   ADF_GEN4_ERRSOU2_CPP_CFC_ATT_INT_BITMASK);
25 
26 	/*
27 	 * Enable uncorrectable error reporting in ERRSOU3
28 	 * but disable RLT error interrupt and VFLR notify interrupt by default
29 	 */
30 	ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3,
31 		   ADF_GEN4_ERRSOU3_RLTERROR_BIT |
32 		   ADF_GEN4_ERRSOU3_VFLRNOTIFY_BIT);
33 }
34 
disable_errsou_reporting(void __iomem * csr)35 static void disable_errsou_reporting(void __iomem *csr)
36 {
37 	u32 val = 0;
38 
39 	/* Disable correctable error reporting in ERRSOU0 */
40 	ADF_CSR_WR(csr, ADF_GEN4_ERRMSK0, ADF_GEN4_ERRSOU0_BIT);
41 
42 	/* Disable uncorrectable error reporting in ERRSOU1 */
43 	ADF_CSR_WR(csr, ADF_GEN4_ERRMSK1, ADF_GEN4_ERRSOU1_BITMASK);
44 
45 	/* Disable uncorrectable error reporting in ERRSOU2 */
46 	val = ADF_CSR_RD(csr, ADF_GEN4_ERRMSK2);
47 	val |= ADF_GEN4_ERRSOU2_DIS_BITMASK;
48 	ADF_CSR_WR(csr, ADF_GEN4_ERRMSK2, val);
49 
50 	/* Disable uncorrectable error reporting in ERRSOU3 */
51 	ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_ERRSOU3_BITMASK);
52 }
53 
enable_ae_error_reporting(struct adf_accel_dev * accel_dev,void __iomem * csr)54 static void enable_ae_error_reporting(struct adf_accel_dev *accel_dev,
55 				      void __iomem *csr)
56 {
57 	u32 ae_mask = GET_HW_DATA(accel_dev)->ae_mask;
58 
59 	/* Enable Acceleration Engine correctable error reporting */
60 	ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOGENABLE_CPP0, ae_mask);
61 
62 	/* Enable Acceleration Engine uncorrectable error reporting */
63 	ADF_CSR_WR(csr, ADF_GEN4_HIAEUNCERRLOGENABLE_CPP0, ae_mask);
64 }
65 
disable_ae_error_reporting(void __iomem * csr)66 static void disable_ae_error_reporting(void __iomem *csr)
67 {
68 	/* Disable Acceleration Engine correctable error reporting */
69 	ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOGENABLE_CPP0, 0);
70 
71 	/* Disable Acceleration Engine uncorrectable error reporting */
72 	ADF_CSR_WR(csr, ADF_GEN4_HIAEUNCERRLOGENABLE_CPP0, 0);
73 }
74 
enable_cpp_error_reporting(struct adf_accel_dev * accel_dev,void __iomem * csr)75 static void enable_cpp_error_reporting(struct adf_accel_dev *accel_dev,
76 				       void __iomem *csr)
77 {
78 	struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
79 
80 	/* Enable HI CPP Agents Command Parity Error Reporting */
81 	ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOGENABLE,
82 		   err_mask->cppagentcmdpar_mask);
83 
84 	ADF_CSR_WR(csr, ADF_GEN4_CPP_CFC_ERR_CTRL,
85 		   ADF_GEN4_CPP_CFC_ERR_CTRL_BITMASK);
86 }
87 
disable_cpp_error_reporting(void __iomem * csr)88 static void disable_cpp_error_reporting(void __iomem *csr)
89 {
90 	/* Disable HI CPP Agents Command Parity Error Reporting */
91 	ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOGENABLE, 0);
92 
93 	ADF_CSR_WR(csr, ADF_GEN4_CPP_CFC_ERR_CTRL,
94 		   ADF_GEN4_CPP_CFC_ERR_CTRL_DIS_BITMASK);
95 }
96 
enable_ti_ri_error_reporting(void __iomem * csr)97 static void enable_ti_ri_error_reporting(void __iomem *csr)
98 {
99 	u32 reg;
100 
101 	/* Enable RI Memory error reporting */
102 	ADF_CSR_WR(csr, ADF_GEN4_RI_MEM_PAR_ERR_EN0,
103 		   ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK |
104 		   ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK);
105 
106 	/* Enable IOSF Primary Command Parity error Reporting */
107 	ADF_CSR_WR(csr, ADF_GEN4_RIMISCCTL, ADF_GEN4_RIMISCSTS_BIT);
108 
109 	/* Enable TI Internal Memory Parity Error reporting */
110 	ADF_CSR_WR(csr, ADF_GEN4_TI_CI_PAR_ERR_MASK, 0);
111 	ADF_CSR_WR(csr, ADF_GEN4_TI_PULL0FUB_PAR_ERR_MASK, 0);
112 	ADF_CSR_WR(csr, ADF_GEN4_TI_PUSHFUB_PAR_ERR_MASK, 0);
113 	ADF_CSR_WR(csr, ADF_GEN4_TI_CD_PAR_ERR_MASK, 0);
114 	ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_ERR_MASK, 0);
115 
116 	/* Enable error handling in RI, TI CPP interface control registers */
117 	ADF_CSR_WR(csr, ADF_GEN4_RICPPINTCTL, ADF_GEN4_RICPPINTCTL_BITMASK);
118 
119 	ADF_CSR_WR(csr, ADF_GEN4_TICPPINTCTL, ADF_GEN4_TICPPINTCTL_BITMASK);
120 
121 	/*
122 	 * Enable error detection and reporting in TIMISCSTS
123 	 * with bits 1, 2 and 30 value preserved
124 	 */
125 	reg = ADF_CSR_RD(csr, ADF_GEN4_TIMISCCTL);
126 	reg &= ADF_GEN4_TIMSCCTL_RELAY_BITMASK;
127 	reg |= ADF_GEN4_TIMISCCTL_BIT;
128 	ADF_CSR_WR(csr, ADF_GEN4_TIMISCCTL, reg);
129 }
130 
disable_ti_ri_error_reporting(void __iomem * csr)131 static void disable_ti_ri_error_reporting(void __iomem *csr)
132 {
133 	u32 reg;
134 
135 	/* Disable RI Memory error reporting */
136 	ADF_CSR_WR(csr, ADF_GEN4_RI_MEM_PAR_ERR_EN0, 0);
137 
138 	/* Disable IOSF Primary Command Parity error Reporting */
139 	ADF_CSR_WR(csr, ADF_GEN4_RIMISCCTL, 0);
140 
141 	/* Disable TI Internal Memory Parity Error reporting */
142 	ADF_CSR_WR(csr, ADF_GEN4_TI_CI_PAR_ERR_MASK,
143 		   ADF_GEN4_TI_CI_PAR_STS_BITMASK);
144 	ADF_CSR_WR(csr, ADF_GEN4_TI_PULL0FUB_PAR_ERR_MASK,
145 		   ADF_GEN4_TI_PULL0FUB_PAR_STS_BITMASK);
146 	ADF_CSR_WR(csr, ADF_GEN4_TI_PUSHFUB_PAR_ERR_MASK,
147 		   ADF_GEN4_TI_PUSHFUB_PAR_STS_BITMASK);
148 	ADF_CSR_WR(csr, ADF_GEN4_TI_CD_PAR_ERR_MASK,
149 		   ADF_GEN4_TI_CD_PAR_STS_BITMASK);
150 	ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_ERR_MASK,
151 		   ADF_GEN4_TI_TRNSB_PAR_STS_BITMASK);
152 
153 	/* Disable error handling in RI, TI CPP interface control registers */
154 	ADF_CSR_WR(csr, ADF_GEN4_RICPPINTCTL, 0);
155 
156 	ADF_CSR_WR(csr, ADF_GEN4_TICPPINTCTL, 0);
157 
158 	/*
159 	 * Disable error detection and reporting in TIMISCSTS
160 	 * with bits 1, 2 and 30 value preserved
161 	 */
162 	reg = ADF_CSR_RD(csr, ADF_GEN4_TIMISCCTL);
163 	reg &= ADF_GEN4_TIMSCCTL_RELAY_BITMASK;
164 	ADF_CSR_WR(csr, ADF_GEN4_TIMISCCTL, reg);
165 }
166 
enable_rf_error_reporting(struct adf_accel_dev * accel_dev,void __iomem * csr)167 static void enable_rf_error_reporting(struct adf_accel_dev *accel_dev,
168 				      void __iomem *csr)
169 {
170 	struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
171 
172 	/* Enable RF parity error in Shared RAM */
173 	ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_SRC, 0);
174 	ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_ATH_CPH, 0);
175 	ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_CPR_XLT, 0);
176 	ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_DCPR_UCS, 0);
177 	ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_PKE, 0);
178 
179 	if (err_mask->parerr_wat_wcp_mask)
180 		ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_WAT_WCP, 0);
181 }
182 
disable_rf_error_reporting(struct adf_accel_dev * accel_dev,void __iomem * csr)183 static void disable_rf_error_reporting(struct adf_accel_dev *accel_dev,
184 				       void __iomem *csr)
185 {
186 	struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
187 
188 	/* Disable RF Parity Error reporting in Shared RAM */
189 	ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_SRC,
190 		   ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT);
191 
192 	ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_ATH_CPH,
193 		   err_mask->parerr_ath_cph_mask);
194 
195 	ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_CPR_XLT,
196 		   err_mask->parerr_cpr_xlt_mask);
197 
198 	ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_DCPR_UCS,
199 		   err_mask->parerr_dcpr_ucs_mask);
200 
201 	ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_PKE,
202 		   err_mask->parerr_pke_mask);
203 
204 	if (err_mask->parerr_wat_wcp_mask)
205 		ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_WAT_WCP,
206 			   err_mask->parerr_wat_wcp_mask);
207 }
208 
enable_ssm_error_reporting(struct adf_accel_dev * accel_dev,void __iomem * csr)209 static void enable_ssm_error_reporting(struct adf_accel_dev *accel_dev,
210 				       void __iomem *csr)
211 {
212 	struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
213 	u32 val = 0;
214 
215 	/* Enable SSM interrupts */
216 	ADF_CSR_WR(csr, ADF_GEN4_INTMASKSSM, 0);
217 
218 	/* Enable shared memory error detection & correction */
219 	val = ADF_CSR_RD(csr, ADF_GEN4_SSMFEATREN);
220 	val |= err_mask->ssmfeatren_mask;
221 	ADF_CSR_WR(csr, ADF_GEN4_SSMFEATREN, val);
222 
223 	/* Enable SER detection in SER_err_ssmsh register */
224 	ADF_CSR_WR(csr, ADF_GEN4_SER_EN_SSMSH,
225 		   ADF_GEN4_SER_EN_SSMSH_BITMASK);
226 
227 	/* Enable SSM soft parity error */
228 	ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_ATH_CPH, 0);
229 	ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_CPR_XLT, 0);
230 	ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_DCPR_UCS, 0);
231 	ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_PKE, 0);
232 
233 	if (err_mask->parerr_wat_wcp_mask)
234 		ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_WAT_WCP, 0);
235 
236 	/* Enable slice hang interrupt reporting */
237 	ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_ATH_CPH, 0);
238 	ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_CPR_XLT, 0);
239 	ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_DCPR_UCS, 0);
240 	ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_PKE, 0);
241 
242 	if (err_mask->parerr_wat_wcp_mask)
243 		ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_WAT_WCP, 0);
244 }
245 
disable_ssm_error_reporting(struct adf_accel_dev * accel_dev,void __iomem * csr)246 static void disable_ssm_error_reporting(struct adf_accel_dev *accel_dev,
247 					void __iomem *csr)
248 {
249 	struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
250 	u32 val = 0;
251 
252 	/* Disable SSM interrupts */
253 	ADF_CSR_WR(csr, ADF_GEN4_INTMASKSSM,
254 		   ADF_GEN4_INTMASKSSM_BITMASK);
255 
256 	/* Disable shared memory error detection & correction */
257 	val = ADF_CSR_RD(csr, ADF_GEN4_SSMFEATREN);
258 	val &= ADF_GEN4_SSMFEATREN_DIS_BITMASK;
259 	ADF_CSR_WR(csr, ADF_GEN4_SSMFEATREN, val);
260 
261 	/* Disable SER detection in SER_err_ssmsh register */
262 	ADF_CSR_WR(csr, ADF_GEN4_SER_EN_SSMSH, 0);
263 
264 	/* Disable SSM soft parity error */
265 	ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_ATH_CPH,
266 		   err_mask->parerr_ath_cph_mask);
267 
268 	ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_CPR_XLT,
269 		   err_mask->parerr_cpr_xlt_mask);
270 
271 	ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_DCPR_UCS,
272 		   err_mask->parerr_dcpr_ucs_mask);
273 
274 	ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_PKE,
275 		   err_mask->parerr_pke_mask);
276 
277 	if (err_mask->parerr_wat_wcp_mask)
278 		ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_WAT_WCP,
279 			   err_mask->parerr_wat_wcp_mask);
280 
281 	/* Disable slice hang interrupt reporting */
282 	ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_ATH_CPH,
283 		   err_mask->parerr_ath_cph_mask);
284 
285 	ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_CPR_XLT,
286 		   err_mask->parerr_cpr_xlt_mask);
287 
288 	ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_DCPR_UCS,
289 		   err_mask->parerr_dcpr_ucs_mask);
290 
291 	ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_PKE,
292 		   err_mask->parerr_pke_mask);
293 
294 	if (err_mask->parerr_wat_wcp_mask)
295 		ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_WAT_WCP,
296 			   err_mask->parerr_wat_wcp_mask);
297 }
298 
enable_aram_error_reporting(void __iomem * csr)299 static void enable_aram_error_reporting(void __iomem *csr)
300 {
301 	ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERRUERR_EN,
302 		   ADF_GEN4_REG_ARAMCERRUERR_EN_BITMASK);
303 
304 	ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERR,
305 		   ADF_GEN4_REG_ARAMCERR_EN_BITMASK);
306 
307 	ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMUERR,
308 		   ADF_GEN4_REG_ARAMUERR_EN_BITMASK);
309 
310 	ADF_CSR_WR(csr, ADF_GEN4_REG_CPPMEMTGTERR,
311 		   ADF_GEN4_REG_CPPMEMTGTERR_EN_BITMASK);
312 }
313 
disable_aram_error_reporting(void __iomem * csr)314 static void disable_aram_error_reporting(void __iomem *csr)
315 {
316 	ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERRUERR_EN, 0);
317 	ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERR, 0);
318 	ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMUERR, 0);
319 	ADF_CSR_WR(csr, ADF_GEN4_REG_CPPMEMTGTERR, 0);
320 }
321 
adf_gen4_enable_ras(struct adf_accel_dev * accel_dev)322 static void adf_gen4_enable_ras(struct adf_accel_dev *accel_dev)
323 {
324 	void __iomem *aram_csr = adf_get_aram_base(accel_dev);
325 	void __iomem *csr = adf_get_pmisc_base(accel_dev);
326 
327 	enable_errsou_reporting(csr);
328 	enable_ae_error_reporting(accel_dev, csr);
329 	enable_cpp_error_reporting(accel_dev, csr);
330 	enable_ti_ri_error_reporting(csr);
331 	enable_rf_error_reporting(accel_dev, csr);
332 	enable_ssm_error_reporting(accel_dev, csr);
333 	enable_aram_error_reporting(aram_csr);
334 }
335 
adf_gen4_disable_ras(struct adf_accel_dev * accel_dev)336 static void adf_gen4_disable_ras(struct adf_accel_dev *accel_dev)
337 {
338 	void __iomem *aram_csr = adf_get_aram_base(accel_dev);
339 	void __iomem *csr = adf_get_pmisc_base(accel_dev);
340 
341 	disable_errsou_reporting(csr);
342 	disable_ae_error_reporting(csr);
343 	disable_cpp_error_reporting(csr);
344 	disable_ti_ri_error_reporting(csr);
345 	disable_rf_error_reporting(accel_dev, csr);
346 	disable_ssm_error_reporting(accel_dev, csr);
347 	disable_aram_error_reporting(aram_csr);
348 }
349 
adf_gen4_process_errsou0(struct adf_accel_dev * accel_dev,void __iomem * csr)350 static void adf_gen4_process_errsou0(struct adf_accel_dev *accel_dev,
351 				     void __iomem *csr)
352 {
353 	u32 aecorrerr = ADF_CSR_RD(csr, ADF_GEN4_HIAECORERRLOG_CPP0);
354 
355 	aecorrerr &= GET_HW_DATA(accel_dev)->ae_mask;
356 
357 	dev_warn(&GET_DEV(accel_dev),
358 		 "Correctable error detected in AE: 0x%x\n",
359 		 aecorrerr);
360 
361 	ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
362 
363 	/* Clear interrupt from ERRSOU0 */
364 	ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOG_CPP0, aecorrerr);
365 }
366 
adf_handle_cpp_aeunc(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)367 static bool adf_handle_cpp_aeunc(struct adf_accel_dev *accel_dev,
368 				 void __iomem *csr, u32 errsou)
369 {
370 	u32 aeuncorerr;
371 
372 	if (!(errsou & ADF_GEN4_ERRSOU1_HIAEUNCERRLOG_CPP0_BIT))
373 		return false;
374 
375 	aeuncorerr = ADF_CSR_RD(csr, ADF_GEN4_HIAEUNCERRLOG_CPP0);
376 	aeuncorerr &= GET_HW_DATA(accel_dev)->ae_mask;
377 
378 	dev_err(&GET_DEV(accel_dev),
379 		"Uncorrectable error detected in AE: 0x%x\n",
380 		aeuncorerr);
381 
382 	ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
383 
384 	ADF_CSR_WR(csr, ADF_GEN4_HIAEUNCERRLOG_CPP0, aeuncorerr);
385 
386 	return false;
387 }
388 
adf_handle_cppcmdparerr(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)389 static bool adf_handle_cppcmdparerr(struct adf_accel_dev *accel_dev,
390 				    void __iomem *csr, u32 errsou)
391 {
392 	struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
393 	u32 cmdparerr;
394 
395 	if (!(errsou & ADF_GEN4_ERRSOU1_HICPPAGENTCMDPARERRLOG_BIT))
396 		return false;
397 
398 	cmdparerr = ADF_CSR_RD(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOG);
399 	cmdparerr &= err_mask->cppagentcmdpar_mask;
400 
401 	dev_err(&GET_DEV(accel_dev),
402 		"HI CPP agent command parity error: 0x%x\n",
403 		cmdparerr);
404 
405 	ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
406 
407 	ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOG, cmdparerr);
408 
409 	return true;
410 }
411 
adf_handle_ri_mem_par_err(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)412 static bool adf_handle_ri_mem_par_err(struct adf_accel_dev *accel_dev,
413 				      void __iomem *csr, u32 errsou)
414 {
415 	bool reset_required = false;
416 	u32 rimem_parerr_sts;
417 
418 	if (!(errsou & ADF_GEN4_ERRSOU1_RIMEM_PARERR_STS_BIT))
419 		return false;
420 
421 	rimem_parerr_sts = ADF_CSR_RD(csr, ADF_GEN4_RIMEM_PARERR_STS);
422 	rimem_parerr_sts &= ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK |
423 			    ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK;
424 
425 	if (rimem_parerr_sts & ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK) {
426 		dev_err(&GET_DEV(accel_dev),
427 			"RI Memory Parity uncorrectable error: 0x%x\n",
428 			rimem_parerr_sts);
429 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
430 	}
431 
432 	if (rimem_parerr_sts & ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK) {
433 		dev_err(&GET_DEV(accel_dev),
434 			"RI Memory Parity fatal error: 0x%x\n",
435 			rimem_parerr_sts);
436 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
437 		reset_required = true;
438 	}
439 
440 	ADF_CSR_WR(csr, ADF_GEN4_RIMEM_PARERR_STS, rimem_parerr_sts);
441 
442 	return reset_required;
443 }
444 
adf_handle_ti_ci_par_sts(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)445 static bool adf_handle_ti_ci_par_sts(struct adf_accel_dev *accel_dev,
446 				     void __iomem *csr, u32 errsou)
447 {
448 	u32 ti_ci_par_sts;
449 
450 	if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT))
451 		return false;
452 
453 	ti_ci_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_CI_PAR_STS);
454 	ti_ci_par_sts &= ADF_GEN4_TI_CI_PAR_STS_BITMASK;
455 
456 	if (ti_ci_par_sts) {
457 		dev_err(&GET_DEV(accel_dev),
458 			"TI Memory Parity Error: 0x%x\n", ti_ci_par_sts);
459 		ADF_CSR_WR(csr, ADF_GEN4_TI_CI_PAR_STS, ti_ci_par_sts);
460 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
461 	}
462 
463 	return false;
464 }
465 
adf_handle_ti_pullfub_par_sts(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)466 static bool adf_handle_ti_pullfub_par_sts(struct adf_accel_dev *accel_dev,
467 					  void __iomem *csr, u32 errsou)
468 {
469 	u32 ti_pullfub_par_sts;
470 
471 	if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT))
472 		return false;
473 
474 	ti_pullfub_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_PULL0FUB_PAR_STS);
475 	ti_pullfub_par_sts &= ADF_GEN4_TI_PULL0FUB_PAR_STS_BITMASK;
476 
477 	if (ti_pullfub_par_sts) {
478 		dev_err(&GET_DEV(accel_dev),
479 			"TI Pull Parity Error: 0x%x\n", ti_pullfub_par_sts);
480 
481 		ADF_CSR_WR(csr, ADF_GEN4_TI_PULL0FUB_PAR_STS,
482 			   ti_pullfub_par_sts);
483 
484 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
485 	}
486 
487 	return false;
488 }
489 
adf_handle_ti_pushfub_par_sts(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)490 static bool adf_handle_ti_pushfub_par_sts(struct adf_accel_dev *accel_dev,
491 					  void __iomem *csr, u32 errsou)
492 {
493 	u32 ti_pushfub_par_sts;
494 
495 	if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT))
496 		return false;
497 
498 	ti_pushfub_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_PUSHFUB_PAR_STS);
499 	ti_pushfub_par_sts &= ADF_GEN4_TI_PUSHFUB_PAR_STS_BITMASK;
500 
501 	if (ti_pushfub_par_sts) {
502 		dev_err(&GET_DEV(accel_dev),
503 			"TI Push Parity Error: 0x%x\n", ti_pushfub_par_sts);
504 
505 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
506 
507 		ADF_CSR_WR(csr, ADF_GEN4_TI_PUSHFUB_PAR_STS,
508 			   ti_pushfub_par_sts);
509 	}
510 
511 	return false;
512 }
513 
adf_handle_ti_cd_par_sts(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)514 static bool adf_handle_ti_cd_par_sts(struct adf_accel_dev *accel_dev,
515 				     void __iomem *csr, u32 errsou)
516 {
517 	u32 ti_cd_par_sts;
518 
519 	if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT))
520 		return false;
521 
522 	ti_cd_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_CD_PAR_STS);
523 	ti_cd_par_sts &= ADF_GEN4_TI_CD_PAR_STS_BITMASK;
524 
525 	if (ti_cd_par_sts) {
526 		dev_err(&GET_DEV(accel_dev),
527 			"TI CD Parity Error: 0x%x\n", ti_cd_par_sts);
528 
529 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
530 
531 		ADF_CSR_WR(csr, ADF_GEN4_TI_CD_PAR_STS, ti_cd_par_sts);
532 	}
533 
534 	return false;
535 }
536 
adf_handle_ti_trnsb_par_sts(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)537 static bool adf_handle_ti_trnsb_par_sts(struct adf_accel_dev *accel_dev,
538 					void __iomem *csr, u32 errsou)
539 {
540 	u32 ti_trnsb_par_sts;
541 
542 	if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT))
543 		return false;
544 
545 	ti_trnsb_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_TRNSB_PAR_STS);
546 	ti_trnsb_par_sts &= ADF_GEN4_TI_TRNSB_PAR_STS_BITMASK;
547 
548 	if (ti_trnsb_par_sts) {
549 		dev_err(&GET_DEV(accel_dev),
550 			"TI TRNSB Parity Error: 0x%x\n", ti_trnsb_par_sts);
551 
552 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
553 
554 		ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_STS, ti_trnsb_par_sts);
555 	}
556 
557 	return false;
558 }
559 
adf_handle_iosfp_cmd_parerr(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)560 static bool adf_handle_iosfp_cmd_parerr(struct adf_accel_dev *accel_dev,
561 					void __iomem *csr, u32 errsou)
562 {
563 	u32 rimiscsts;
564 
565 	if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT))
566 		return false;
567 
568 	rimiscsts = ADF_CSR_RD(csr, ADF_GEN4_RIMISCSTS);
569 	rimiscsts &= ADF_GEN4_RIMISCSTS_BIT;
570 
571 	dev_err(&GET_DEV(accel_dev),
572 		"Command Parity error detected on IOSFP: 0x%x\n",
573 		rimiscsts);
574 
575 	ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
576 
577 	ADF_CSR_WR(csr, ADF_GEN4_RIMISCSTS, rimiscsts);
578 
579 	return true;
580 }
581 
adf_gen4_process_errsou1(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou,bool * reset_required)582 static void adf_gen4_process_errsou1(struct adf_accel_dev *accel_dev,
583 				     void __iomem *csr, u32 errsou,
584 				     bool *reset_required)
585 {
586 	*reset_required |= adf_handle_cpp_aeunc(accel_dev, csr, errsou);
587 	*reset_required |= adf_handle_cppcmdparerr(accel_dev, csr, errsou);
588 	*reset_required |= adf_handle_ri_mem_par_err(accel_dev, csr, errsou);
589 	*reset_required |= adf_handle_ti_ci_par_sts(accel_dev, csr, errsou);
590 	*reset_required |= adf_handle_ti_pullfub_par_sts(accel_dev, csr, errsou);
591 	*reset_required |= adf_handle_ti_pushfub_par_sts(accel_dev, csr, errsou);
592 	*reset_required |= adf_handle_ti_cd_par_sts(accel_dev, csr, errsou);
593 	*reset_required |= adf_handle_ti_trnsb_par_sts(accel_dev, csr, errsou);
594 	*reset_required |= adf_handle_iosfp_cmd_parerr(accel_dev, csr, errsou);
595 }
596 
adf_handle_uerrssmsh(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 iastatssm)597 static bool adf_handle_uerrssmsh(struct adf_accel_dev *accel_dev,
598 				 void __iomem *csr, u32 iastatssm)
599 {
600 	u32 reg;
601 
602 	if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_UERRSSMSH_BIT))
603 		return false;
604 
605 	reg = ADF_CSR_RD(csr, ADF_GEN4_UERRSSMSH);
606 	reg &= ADF_GEN4_UERRSSMSH_BITMASK;
607 
608 	dev_err(&GET_DEV(accel_dev),
609 		"Uncorrectable error on ssm shared memory: 0x%x\n",
610 		reg);
611 
612 	ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
613 
614 	ADF_CSR_WR(csr, ADF_GEN4_UERRSSMSH, reg);
615 
616 	return false;
617 }
618 
adf_handle_cerrssmsh(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 iastatssm)619 static bool adf_handle_cerrssmsh(struct adf_accel_dev *accel_dev,
620 				 void __iomem *csr, u32 iastatssm)
621 {
622 	u32 reg;
623 
624 	if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_CERRSSMSH_BIT))
625 		return false;
626 
627 	reg = ADF_CSR_RD(csr, ADF_GEN4_CERRSSMSH);
628 	reg &= ADF_GEN4_CERRSSMSH_ERROR_BIT;
629 
630 	dev_warn(&GET_DEV(accel_dev),
631 		 "Correctable error on ssm shared memory: 0x%x\n",
632 		 reg);
633 
634 	ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
635 
636 	ADF_CSR_WR(csr, ADF_GEN4_CERRSSMSH, reg);
637 
638 	return false;
639 }
640 
adf_handle_pperr_err(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 iastatssm)641 static bool adf_handle_pperr_err(struct adf_accel_dev *accel_dev,
642 				 void __iomem *csr, u32 iastatssm)
643 {
644 	u32 reg;
645 
646 	if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_PPERR_BIT))
647 		return false;
648 
649 	reg = ADF_CSR_RD(csr, ADF_GEN4_PPERR);
650 	reg &= ADF_GEN4_PPERR_BITMASK;
651 
652 	dev_err(&GET_DEV(accel_dev),
653 		"Uncorrectable error CPP transaction on memory target: 0x%x\n",
654 		reg);
655 
656 	ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
657 
658 	ADF_CSR_WR(csr, ADF_GEN4_PPERR, reg);
659 
660 	return false;
661 }
662 
adf_poll_slicehang_csr(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 slice_hang_offset,char * slice_name)663 static void adf_poll_slicehang_csr(struct adf_accel_dev *accel_dev,
664 				   void __iomem *csr, u32 slice_hang_offset,
665 				   char *slice_name)
666 {
667 	u32 slice_hang_reg = ADF_CSR_RD(csr, slice_hang_offset);
668 
669 	if (!slice_hang_reg)
670 		return;
671 
672 	dev_err(&GET_DEV(accel_dev),
673 		"Slice %s hang error encountered\n", slice_name);
674 
675 	ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
676 }
677 
adf_handle_slice_hang_error(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 iastatssm)678 static bool adf_handle_slice_hang_error(struct adf_accel_dev *accel_dev,
679 					void __iomem *csr, u32 iastatssm)
680 {
681 	struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
682 
683 	if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SLICEHANG_ERR_BIT))
684 		return false;
685 
686 	adf_poll_slicehang_csr(accel_dev, csr,
687 			       ADF_GEN4_SLICEHANGSTATUS_ATH_CPH, "ath_cph");
688 	adf_poll_slicehang_csr(accel_dev, csr,
689 			       ADF_GEN4_SLICEHANGSTATUS_CPR_XLT, "cpr_xlt");
690 	adf_poll_slicehang_csr(accel_dev, csr,
691 			       ADF_GEN4_SLICEHANGSTATUS_DCPR_UCS, "dcpr_ucs");
692 	adf_poll_slicehang_csr(accel_dev, csr,
693 			       ADF_GEN4_SLICEHANGSTATUS_PKE, "pke");
694 
695 	if (err_mask->parerr_wat_wcp_mask)
696 		adf_poll_slicehang_csr(accel_dev, csr,
697 				       ADF_GEN4_SLICEHANGSTATUS_WAT_WCP,
698 				       "ath_cph");
699 
700 	return false;
701 }
702 
adf_handle_spp_pullcmd_err(struct adf_accel_dev * accel_dev,void __iomem * csr)703 static bool adf_handle_spp_pullcmd_err(struct adf_accel_dev *accel_dev,
704 				       void __iomem *csr)
705 {
706 	struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
707 	bool reset_required = false;
708 	u32 reg;
709 
710 	reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_ATH_CPH);
711 	reg &= err_mask->parerr_ath_cph_mask;
712 	if (reg) {
713 		dev_err(&GET_DEV(accel_dev),
714 			"SPP pull command fatal error ATH_CPH: 0x%x\n", reg);
715 
716 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
717 
718 		ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_ATH_CPH, reg);
719 
720 		reset_required = true;
721 	}
722 
723 	reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_CPR_XLT);
724 	reg &= err_mask->parerr_cpr_xlt_mask;
725 	if (reg) {
726 		dev_err(&GET_DEV(accel_dev),
727 			"SPP pull command fatal error CPR_XLT: 0x%x\n", reg);
728 
729 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
730 
731 		ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_CPR_XLT, reg);
732 
733 		reset_required = true;
734 	}
735 
736 	reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_DCPR_UCS);
737 	reg &= err_mask->parerr_dcpr_ucs_mask;
738 	if (reg) {
739 		dev_err(&GET_DEV(accel_dev),
740 			"SPP pull command fatal error DCPR_UCS: 0x%x\n", reg);
741 
742 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
743 
744 		ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_DCPR_UCS, reg);
745 
746 		reset_required = true;
747 	}
748 
749 	reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_PKE);
750 	reg &= err_mask->parerr_pke_mask;
751 	if (reg) {
752 		dev_err(&GET_DEV(accel_dev),
753 			"SPP pull command fatal error PKE: 0x%x\n", reg);
754 
755 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
756 
757 		ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_PKE, reg);
758 
759 		reset_required = true;
760 	}
761 
762 	if (err_mask->parerr_wat_wcp_mask) {
763 		reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_WAT_WCP);
764 		reg &= err_mask->parerr_wat_wcp_mask;
765 		if (reg) {
766 			dev_err(&GET_DEV(accel_dev),
767 				"SPP pull command fatal error WAT_WCP: 0x%x\n", reg);
768 
769 			ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
770 
771 			ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_WAT_WCP, reg);
772 
773 			reset_required = true;
774 		}
775 	}
776 
777 	return reset_required;
778 }
779 
adf_handle_spp_pulldata_err(struct adf_accel_dev * accel_dev,void __iomem * csr)780 static bool adf_handle_spp_pulldata_err(struct adf_accel_dev *accel_dev,
781 					void __iomem *csr)
782 {
783 	struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
784 	u32 reg;
785 
786 	reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_ATH_CPH);
787 	reg &= err_mask->parerr_ath_cph_mask;
788 	if (reg) {
789 		dev_err(&GET_DEV(accel_dev),
790 			"SPP pull data err ATH_CPH: 0x%x\n", reg);
791 
792 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
793 
794 		ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_ATH_CPH, reg);
795 	}
796 
797 	reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_CPR_XLT);
798 	reg &= err_mask->parerr_cpr_xlt_mask;
799 	if (reg) {
800 		dev_err(&GET_DEV(accel_dev),
801 			"SPP pull data err CPR_XLT: 0x%x\n", reg);
802 
803 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
804 
805 		ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_CPR_XLT, reg);
806 	}
807 
808 	reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_DCPR_UCS);
809 	reg &= err_mask->parerr_dcpr_ucs_mask;
810 	if (reg) {
811 		dev_err(&GET_DEV(accel_dev),
812 			"SPP pull data err DCPR_UCS: 0x%x\n", reg);
813 
814 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
815 
816 		ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_DCPR_UCS, reg);
817 	}
818 
819 	reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_PKE);
820 	reg &= err_mask->parerr_pke_mask;
821 	if (reg) {
822 		dev_err(&GET_DEV(accel_dev),
823 			"SPP pull data err PKE: 0x%x\n", reg);
824 
825 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
826 
827 		ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_PKE, reg);
828 	}
829 
830 	if (err_mask->parerr_wat_wcp_mask) {
831 		reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_WAT_WCP);
832 		reg &= err_mask->parerr_wat_wcp_mask;
833 		if (reg) {
834 			dev_err(&GET_DEV(accel_dev),
835 				"SPP pull data err WAT_WCP: 0x%x\n", reg);
836 
837 			ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
838 
839 			ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_WAT_WCP, reg);
840 		}
841 	}
842 
843 	return false;
844 }
845 
adf_handle_spp_pushcmd_err(struct adf_accel_dev * accel_dev,void __iomem * csr)846 static bool adf_handle_spp_pushcmd_err(struct adf_accel_dev *accel_dev,
847 				       void __iomem *csr)
848 {
849 	struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
850 	bool reset_required = false;
851 	u32 reg;
852 
853 	reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_ATH_CPH);
854 	reg &= err_mask->parerr_ath_cph_mask;
855 	if (reg) {
856 		dev_err(&GET_DEV(accel_dev),
857 			"SPP push command fatal error ATH_CPH: 0x%x\n", reg);
858 
859 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
860 
861 		ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_ATH_CPH, reg);
862 
863 		reset_required = true;
864 	}
865 
866 	reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_CPR_XLT);
867 	reg &= err_mask->parerr_cpr_xlt_mask;
868 	if (reg) {
869 		dev_err(&GET_DEV(accel_dev),
870 			"SPP push command fatal error CPR_XLT: 0x%x\n", reg);
871 
872 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
873 
874 		ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_CPR_XLT, reg);
875 
876 		reset_required = true;
877 	}
878 
879 	reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_DCPR_UCS);
880 	reg &= err_mask->parerr_dcpr_ucs_mask;
881 	if (reg) {
882 		dev_err(&GET_DEV(accel_dev),
883 			"SPP push command fatal error DCPR_UCS: 0x%x\n", reg);
884 
885 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
886 
887 		ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_DCPR_UCS, reg);
888 
889 		reset_required = true;
890 	}
891 
892 	reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_PKE);
893 	reg &= err_mask->parerr_pke_mask;
894 	if (reg) {
895 		dev_err(&GET_DEV(accel_dev),
896 			"SPP push command fatal error PKE: 0x%x\n",
897 			reg);
898 
899 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
900 
901 		ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_PKE, reg);
902 
903 		reset_required = true;
904 	}
905 
906 	if (err_mask->parerr_wat_wcp_mask) {
907 		reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_WAT_WCP);
908 		reg &= err_mask->parerr_wat_wcp_mask;
909 		if (reg) {
910 			dev_err(&GET_DEV(accel_dev),
911 				"SPP push command fatal error WAT_WCP: 0x%x\n", reg);
912 
913 			ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
914 
915 			ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_WAT_WCP, reg);
916 
917 			reset_required = true;
918 		}
919 	}
920 
921 	return reset_required;
922 }
923 
adf_handle_spp_pushdata_err(struct adf_accel_dev * accel_dev,void __iomem * csr)924 static bool adf_handle_spp_pushdata_err(struct adf_accel_dev *accel_dev,
925 					void __iomem *csr)
926 {
927 	struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
928 	u32 reg;
929 
930 	reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_ATH_CPH);
931 	reg &= err_mask->parerr_ath_cph_mask;
932 	if (reg) {
933 		dev_err(&GET_DEV(accel_dev),
934 			"SPP push data err ATH_CPH: 0x%x\n", reg);
935 
936 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
937 
938 		ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_ATH_CPH, reg);
939 	}
940 
941 	reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_CPR_XLT);
942 	reg &= err_mask->parerr_cpr_xlt_mask;
943 	if (reg) {
944 		dev_err(&GET_DEV(accel_dev),
945 			"SPP push data err CPR_XLT: 0x%x\n", reg);
946 
947 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
948 
949 		ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_CPR_XLT, reg);
950 	}
951 
952 	reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_DCPR_UCS);
953 	reg &= err_mask->parerr_dcpr_ucs_mask;
954 	if (reg) {
955 		dev_err(&GET_DEV(accel_dev),
956 			"SPP push data err DCPR_UCS: 0x%x\n", reg);
957 
958 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
959 
960 		ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_DCPR_UCS, reg);
961 	}
962 
963 	reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_PKE);
964 	reg &= err_mask->parerr_pke_mask;
965 	if (reg) {
966 		dev_err(&GET_DEV(accel_dev),
967 			"SPP push data err PKE: 0x%x\n", reg);
968 
969 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
970 
971 		ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_PKE, reg);
972 	}
973 
974 	if (err_mask->parerr_wat_wcp_mask) {
975 		reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_WAT_WCP);
976 		reg &= err_mask->parerr_wat_wcp_mask;
977 		if (reg) {
978 			dev_err(&GET_DEV(accel_dev),
979 				"SPP push data err WAT_WCP: 0x%x\n", reg);
980 
981 			ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
982 
983 			ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_WAT_WCP,
984 				   reg);
985 		}
986 	}
987 
988 	return false;
989 }
990 
adf_handle_spppar_err(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 iastatssm)991 static bool adf_handle_spppar_err(struct adf_accel_dev *accel_dev,
992 				  void __iomem *csr, u32 iastatssm)
993 {
994 	bool reset_required;
995 
996 	if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SPPPARERR_BIT))
997 		return false;
998 
999 	reset_required = adf_handle_spp_pullcmd_err(accel_dev, csr);
1000 	reset_required |= adf_handle_spp_pulldata_err(accel_dev, csr);
1001 	reset_required |= adf_handle_spp_pushcmd_err(accel_dev, csr);
1002 	reset_required |= adf_handle_spp_pushdata_err(accel_dev, csr);
1003 
1004 	return reset_required;
1005 }
1006 
adf_handle_ssmcpppar_err(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 iastatssm)1007 static bool adf_handle_ssmcpppar_err(struct adf_accel_dev *accel_dev,
1008 				     void __iomem *csr, u32 iastatssm)
1009 {
1010 	u32 reg, bits_num = BITS_PER_REG(reg);
1011 	bool reset_required = false;
1012 	unsigned long errs_bits;
1013 	u32 bit_iterator;
1014 
1015 	if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SSMCPPERR_BIT))
1016 		return false;
1017 
1018 	reg = ADF_CSR_RD(csr, ADF_GEN4_SSMCPPERR);
1019 	reg &= ADF_GEN4_SSMCPPERR_FATAL_BITMASK | ADF_GEN4_SSMCPPERR_UNCERR_BITMASK;
1020 	if (reg & ADF_GEN4_SSMCPPERR_FATAL_BITMASK) {
1021 		dev_err(&GET_DEV(accel_dev),
1022 			"Fatal SSM CPP parity error: 0x%x\n", reg);
1023 
1024 		errs_bits = reg & ADF_GEN4_SSMCPPERR_FATAL_BITMASK;
1025 		for_each_set_bit(bit_iterator, &errs_bits, bits_num) {
1026 			ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
1027 		}
1028 		reset_required = true;
1029 	}
1030 
1031 	if (reg & ADF_GEN4_SSMCPPERR_UNCERR_BITMASK) {
1032 		dev_err(&GET_DEV(accel_dev),
1033 			"non-Fatal SSM CPP parity error: 0x%x\n", reg);
1034 		errs_bits = reg & ADF_GEN4_SSMCPPERR_UNCERR_BITMASK;
1035 
1036 		for_each_set_bit(bit_iterator, &errs_bits, bits_num) {
1037 			ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
1038 		}
1039 	}
1040 
1041 	ADF_CSR_WR(csr, ADF_GEN4_SSMCPPERR, reg);
1042 
1043 	return reset_required;
1044 }
1045 
adf_handle_rf_parr_err(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 iastatssm)1046 static bool adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev,
1047 				   void __iomem *csr, u32 iastatssm)
1048 {
1049 	struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
1050 	u32 reg;
1051 
1052 	if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT))
1053 		return false;
1054 
1055 	reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC);
1056 	reg &= ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT;
1057 	if (reg) {
1058 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
1059 		ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC, reg);
1060 	}
1061 
1062 	reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH);
1063 	reg &= err_mask->parerr_ath_cph_mask;
1064 	if (reg) {
1065 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
1066 		ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH, reg);
1067 	}
1068 
1069 	reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT);
1070 	reg &= err_mask->parerr_cpr_xlt_mask;
1071 	if (reg) {
1072 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
1073 		ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT, reg);
1074 	}
1075 
1076 	reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS);
1077 	reg &= err_mask->parerr_dcpr_ucs_mask;
1078 	if (reg) {
1079 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
1080 		ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS, reg);
1081 	}
1082 
1083 	reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE);
1084 	reg &= err_mask->parerr_pke_mask;
1085 	if (reg) {
1086 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
1087 		ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE, reg);
1088 	}
1089 
1090 	if (err_mask->parerr_wat_wcp_mask) {
1091 		reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP);
1092 		reg &= err_mask->parerr_wat_wcp_mask;
1093 		if (reg) {
1094 			ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
1095 			ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP,
1096 				   reg);
1097 		}
1098 	}
1099 
1100 	dev_err(&GET_DEV(accel_dev), "Slice ssm soft parity error reported");
1101 
1102 	return false;
1103 }
1104 
adf_handle_ser_err_ssmsh(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 iastatssm)1105 static bool adf_handle_ser_err_ssmsh(struct adf_accel_dev *accel_dev,
1106 				     void __iomem *csr, u32 iastatssm)
1107 {
1108 	u32 reg, bits_num = BITS_PER_REG(reg);
1109 	bool reset_required = false;
1110 	unsigned long errs_bits;
1111 	u32 bit_iterator;
1112 
1113 	if (!(iastatssm & (ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_CERR_BIT |
1114 			 ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_UNCERR_BIT)))
1115 		return false;
1116 
1117 	reg = ADF_CSR_RD(csr, ADF_GEN4_SER_ERR_SSMSH);
1118 	reg &= ADF_GEN4_SER_ERR_SSMSH_FATAL_BITMASK |
1119 	       ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK |
1120 	       ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK;
1121 	if (reg & ADF_GEN4_SER_ERR_SSMSH_FATAL_BITMASK) {
1122 		dev_err(&GET_DEV(accel_dev),
1123 			"Fatal SER_SSMSH_ERR: 0x%x\n", reg);
1124 
1125 		errs_bits = reg & ADF_GEN4_SER_ERR_SSMSH_FATAL_BITMASK;
1126 		for_each_set_bit(bit_iterator, &errs_bits, bits_num) {
1127 			ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
1128 		}
1129 
1130 		reset_required = true;
1131 	}
1132 
1133 	if (reg & ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK) {
1134 		dev_err(&GET_DEV(accel_dev),
1135 			"non-fatal SER_SSMSH_ERR: 0x%x\n", reg);
1136 
1137 		errs_bits = reg & ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK;
1138 		for_each_set_bit(bit_iterator, &errs_bits, bits_num) {
1139 			ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
1140 		}
1141 	}
1142 
1143 	if (reg & ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK) {
1144 		dev_warn(&GET_DEV(accel_dev),
1145 			 "Correctable SER_SSMSH_ERR: 0x%x\n", reg);
1146 
1147 		errs_bits = reg & ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK;
1148 		for_each_set_bit(bit_iterator, &errs_bits, bits_num) {
1149 			ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
1150 		}
1151 	}
1152 
1153 	ADF_CSR_WR(csr, ADF_GEN4_SER_ERR_SSMSH, reg);
1154 
1155 	return reset_required;
1156 }
1157 
adf_handle_iaintstatssm(struct adf_accel_dev * accel_dev,void __iomem * csr)1158 static bool adf_handle_iaintstatssm(struct adf_accel_dev *accel_dev,
1159 				    void __iomem *csr)
1160 {
1161 	u32 iastatssm = ADF_CSR_RD(csr, ADF_GEN4_IAINTSTATSSM);
1162 	bool reset_required;
1163 
1164 	iastatssm &= ADF_GEN4_IAINTSTATSSM_BITMASK;
1165 	if (!iastatssm)
1166 		return false;
1167 
1168 	reset_required = adf_handle_uerrssmsh(accel_dev, csr, iastatssm);
1169 	reset_required |= adf_handle_cerrssmsh(accel_dev, csr, iastatssm);
1170 	reset_required |= adf_handle_pperr_err(accel_dev, csr, iastatssm);
1171 	reset_required |= adf_handle_slice_hang_error(accel_dev, csr, iastatssm);
1172 	reset_required |= adf_handle_spppar_err(accel_dev, csr, iastatssm);
1173 	reset_required |= adf_handle_ssmcpppar_err(accel_dev, csr, iastatssm);
1174 	reset_required |= adf_handle_rf_parr_err(accel_dev, csr, iastatssm);
1175 	reset_required |= adf_handle_ser_err_ssmsh(accel_dev, csr, iastatssm);
1176 
1177 	ADF_CSR_WR(csr, ADF_GEN4_IAINTSTATSSM, iastatssm);
1178 
1179 	return reset_required;
1180 }
1181 
adf_handle_exprpssmcmpr(struct adf_accel_dev * accel_dev,void __iomem * csr)1182 static bool adf_handle_exprpssmcmpr(struct adf_accel_dev *accel_dev,
1183 				    void __iomem *csr)
1184 {
1185 	u32 reg = ADF_CSR_RD(csr, ADF_GEN4_EXPRPSSMCPR);
1186 
1187 	reg &= ADF_GEN4_EXPRPSSMCPR_UNCERR_BITMASK;
1188 	if (!reg)
1189 		return false;
1190 
1191 	dev_err(&GET_DEV(accel_dev),
1192 		"Uncorrectable error exception in SSM CMP: 0x%x", reg);
1193 
1194 	ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
1195 
1196 	ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMCPR, reg);
1197 
1198 	return false;
1199 }
1200 
adf_handle_exprpssmxlt(struct adf_accel_dev * accel_dev,void __iomem * csr)1201 static bool adf_handle_exprpssmxlt(struct adf_accel_dev *accel_dev,
1202 				   void __iomem *csr)
1203 {
1204 	u32 reg = ADF_CSR_RD(csr, ADF_GEN4_EXPRPSSMXLT);
1205 
1206 	reg &= ADF_GEN4_EXPRPSSMXLT_UNCERR_BITMASK |
1207 	       ADF_GEN4_EXPRPSSMXLT_CERR_BIT;
1208 	if (!reg)
1209 		return false;
1210 
1211 	if (reg & ADF_GEN4_EXPRPSSMXLT_UNCERR_BITMASK) {
1212 		dev_err(&GET_DEV(accel_dev),
1213 			"Uncorrectable error exception in SSM XLT: 0x%x", reg);
1214 
1215 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
1216 	}
1217 
1218 	if (reg & ADF_GEN4_EXPRPSSMXLT_CERR_BIT) {
1219 		dev_warn(&GET_DEV(accel_dev),
1220 			 "Correctable error exception in SSM XLT: 0x%x", reg);
1221 
1222 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
1223 	}
1224 
1225 	ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMXLT, reg);
1226 
1227 	return false;
1228 }
1229 
adf_handle_exprpssmdcpr(struct adf_accel_dev * accel_dev,void __iomem * csr)1230 static bool adf_handle_exprpssmdcpr(struct adf_accel_dev *accel_dev,
1231 				    void __iomem *csr)
1232 {
1233 	u32 reg;
1234 	int i;
1235 
1236 	for (i = 0; i < ADF_GEN4_DCPR_SLICES_NUM; i++) {
1237 		reg = ADF_CSR_RD(csr, ADF_GEN4_EXPRPSSMDCPR(i));
1238 		reg &= ADF_GEN4_EXPRPSSMDCPR_UNCERR_BITMASK |
1239 		       ADF_GEN4_EXPRPSSMDCPR_CERR_BITMASK;
1240 		if (!reg)
1241 			continue;
1242 
1243 		if (reg & ADF_GEN4_EXPRPSSMDCPR_UNCERR_BITMASK) {
1244 			dev_err(&GET_DEV(accel_dev),
1245 				"Uncorrectable error exception in SSM DCMP: 0x%x", reg);
1246 
1247 			ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
1248 		}
1249 
1250 		if (reg & ADF_GEN4_EXPRPSSMDCPR_CERR_BITMASK) {
1251 			dev_warn(&GET_DEV(accel_dev),
1252 				 "Correctable error exception in SSM DCMP: 0x%x", reg);
1253 
1254 			ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
1255 		}
1256 
1257 		ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMDCPR(i), reg);
1258 	}
1259 
1260 	return false;
1261 }
1262 
adf_handle_ssm(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)1263 static bool adf_handle_ssm(struct adf_accel_dev *accel_dev, void __iomem *csr,
1264 			   u32 errsou)
1265 {
1266 	bool reset_required;
1267 
1268 	if (!(errsou & ADF_GEN4_ERRSOU2_SSM_ERR_BIT))
1269 		return false;
1270 
1271 	reset_required = adf_handle_iaintstatssm(accel_dev, csr);
1272 	reset_required |= adf_handle_exprpssmcmpr(accel_dev, csr);
1273 	reset_required |= adf_handle_exprpssmxlt(accel_dev, csr);
1274 	reset_required |= adf_handle_exprpssmdcpr(accel_dev, csr);
1275 
1276 	return reset_required;
1277 }
1278 
adf_handle_cpp_cfc_err(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)1279 static bool adf_handle_cpp_cfc_err(struct adf_accel_dev *accel_dev,
1280 				   void __iomem *csr, u32 errsou)
1281 {
1282 	bool reset_required = false;
1283 	u32 reg;
1284 
1285 	if (!(errsou & ADF_GEN4_ERRSOU2_CPP_CFC_ERR_STATUS_BIT))
1286 		return false;
1287 
1288 	reg = ADF_CSR_RD(csr, ADF_GEN4_CPP_CFC_ERR_STATUS);
1289 	if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_DATAPAR_BIT) {
1290 		dev_err(&GET_DEV(accel_dev),
1291 			"CPP_CFC_ERR: data parity: 0x%x", reg);
1292 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
1293 	}
1294 
1295 	if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_CMDPAR_BIT) {
1296 		dev_err(&GET_DEV(accel_dev),
1297 			"CPP_CFC_ERR: command parity: 0x%x", reg);
1298 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
1299 
1300 		reset_required = true;
1301 	}
1302 
1303 	if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_MERR_BIT) {
1304 		dev_err(&GET_DEV(accel_dev),
1305 			"CPP_CFC_ERR: multiple errors: 0x%x", reg);
1306 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
1307 
1308 		reset_required = true;
1309 	}
1310 
1311 	ADF_CSR_WR(csr, ADF_GEN4_CPP_CFC_ERR_STATUS_CLR,
1312 		   ADF_GEN4_CPP_CFC_ERR_STATUS_CLR_BITMASK);
1313 
1314 	return reset_required;
1315 }
1316 
adf_gen4_process_errsou2(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou,bool * reset_required)1317 static void adf_gen4_process_errsou2(struct adf_accel_dev *accel_dev,
1318 				     void __iomem *csr, u32 errsou,
1319 				     bool *reset_required)
1320 {
1321 	*reset_required |= adf_handle_ssm(accel_dev, csr, errsou);
1322 	*reset_required |= adf_handle_cpp_cfc_err(accel_dev, csr, errsou);
1323 }
1324 
adf_handle_timiscsts(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)1325 static bool adf_handle_timiscsts(struct adf_accel_dev *accel_dev,
1326 				 void __iomem *csr, u32 errsou)
1327 {
1328 	u32 timiscsts;
1329 
1330 	if (!(errsou & ADF_GEN4_ERRSOU3_TIMISCSTS_BIT))
1331 		return false;
1332 
1333 	timiscsts = ADF_CSR_RD(csr, ADF_GEN4_TIMISCSTS);
1334 
1335 	dev_err(&GET_DEV(accel_dev),
1336 		"Fatal error in Transmit Interface: 0x%x\n", timiscsts);
1337 
1338 	ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
1339 
1340 	return true;
1341 }
1342 
adf_handle_ricppintsts(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)1343 static bool adf_handle_ricppintsts(struct adf_accel_dev *accel_dev,
1344 				   void __iomem *csr, u32 errsou)
1345 {
1346 	u32 ricppintsts;
1347 
1348 	if (!(errsou & ADF_GEN4_ERRSOU3_RICPPINTSTS_BITMASK))
1349 		return false;
1350 
1351 	ricppintsts = ADF_CSR_RD(csr, ADF_GEN4_RICPPINTSTS);
1352 	ricppintsts &= ADF_GEN4_RICPPINTSTS_BITMASK;
1353 
1354 	dev_err(&GET_DEV(accel_dev),
1355 		"RI CPP Uncorrectable Error: 0x%x\n", ricppintsts);
1356 
1357 	ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
1358 
1359 	ADF_CSR_WR(csr, ADF_GEN4_RICPPINTSTS, ricppintsts);
1360 
1361 	return false;
1362 }
1363 
adf_handle_ticppintsts(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)1364 static bool adf_handle_ticppintsts(struct adf_accel_dev *accel_dev,
1365 				   void __iomem *csr, u32 errsou)
1366 {
1367 	u32 ticppintsts;
1368 
1369 	if (!(errsou & ADF_GEN4_ERRSOU3_TICPPINTSTS_BITMASK))
1370 		return false;
1371 
1372 	ticppintsts = ADF_CSR_RD(csr, ADF_GEN4_TICPPINTSTS);
1373 	ticppintsts &= ADF_GEN4_TICPPINTSTS_BITMASK;
1374 
1375 	dev_err(&GET_DEV(accel_dev),
1376 		"TI CPP Uncorrectable Error: 0x%x\n", ticppintsts);
1377 
1378 	ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
1379 
1380 	ADF_CSR_WR(csr, ADF_GEN4_TICPPINTSTS, ticppintsts);
1381 
1382 	return false;
1383 }
1384 
adf_handle_aramcerr(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)1385 static bool adf_handle_aramcerr(struct adf_accel_dev *accel_dev,
1386 				void __iomem *csr, u32 errsou)
1387 {
1388 	u32 aram_cerr;
1389 
1390 	if (!(errsou & ADF_GEN4_ERRSOU3_REG_ARAMCERR_BIT))
1391 		return false;
1392 
1393 	aram_cerr = ADF_CSR_RD(csr, ADF_GEN4_REG_ARAMCERR);
1394 	aram_cerr &= ADF_GEN4_REG_ARAMCERR_BIT;
1395 
1396 	dev_warn(&GET_DEV(accel_dev),
1397 		 "ARAM correctable error : 0x%x\n", aram_cerr);
1398 
1399 	ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
1400 
1401 	aram_cerr |= ADF_GEN4_REG_ARAMCERR_EN_BITMASK;
1402 
1403 	ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERR, aram_cerr);
1404 
1405 	return false;
1406 }
1407 
adf_handle_aramuerr(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)1408 static bool adf_handle_aramuerr(struct adf_accel_dev *accel_dev,
1409 				void __iomem *csr, u32 errsou)
1410 {
1411 	bool reset_required = false;
1412 	u32 aramuerr;
1413 
1414 	if (!(errsou & ADF_GEN4_ERRSOU3_REG_ARAMUERR_BIT))
1415 		return false;
1416 
1417 	aramuerr = ADF_CSR_RD(csr, ADF_GEN4_REG_ARAMUERR);
1418 	aramuerr &= ADF_GEN4_REG_ARAMUERR_ERROR_BIT |
1419 		    ADF_GEN4_REG_ARAMUERR_MULTI_ERRORS_BIT;
1420 
1421 	if (!aramuerr)
1422 		return false;
1423 
1424 	if (aramuerr & ADF_GEN4_REG_ARAMUERR_MULTI_ERRORS_BIT) {
1425 		dev_err(&GET_DEV(accel_dev),
1426 			"ARAM multiple uncorrectable errors: 0x%x\n", aramuerr);
1427 
1428 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
1429 
1430 		reset_required = true;
1431 	} else {
1432 		dev_err(&GET_DEV(accel_dev),
1433 			"ARAM uncorrectable error: 0x%x\n", aramuerr);
1434 
1435 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
1436 	}
1437 
1438 	aramuerr |= ADF_GEN4_REG_ARAMUERR_EN_BITMASK;
1439 
1440 	ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMUERR, aramuerr);
1441 
1442 	return reset_required;
1443 }
1444 
adf_handle_reg_cppmemtgterr(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)1445 static bool adf_handle_reg_cppmemtgterr(struct adf_accel_dev *accel_dev,
1446 					void __iomem *csr, u32 errsou)
1447 {
1448 	bool reset_required = false;
1449 	u32 cppmemtgterr;
1450 
1451 	if (!(errsou & ADF_GEN4_ERRSOU3_REG_ARAMUERR_BIT))
1452 		return false;
1453 
1454 	cppmemtgterr = ADF_CSR_RD(csr, ADF_GEN4_REG_CPPMEMTGTERR);
1455 	cppmemtgterr &= ADF_GEN4_REG_CPPMEMTGTERR_BITMASK |
1456 			ADF_GEN4_REG_CPPMEMTGTERR_MULTI_ERRORS_BIT;
1457 	if (!cppmemtgterr)
1458 		return false;
1459 
1460 	if (cppmemtgterr & ADF_GEN4_REG_CPPMEMTGTERR_MULTI_ERRORS_BIT) {
1461 		dev_err(&GET_DEV(accel_dev),
1462 			"Misc memory target multiple uncorrectable errors: 0x%x\n",
1463 			cppmemtgterr);
1464 
1465 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
1466 
1467 		reset_required = true;
1468 	} else {
1469 		dev_err(&GET_DEV(accel_dev),
1470 			"Misc memory target uncorrectable error: 0x%x\n", cppmemtgterr);
1471 		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
1472 	}
1473 
1474 	cppmemtgterr |= ADF_GEN4_REG_CPPMEMTGTERR_EN_BITMASK;
1475 
1476 	ADF_CSR_WR(csr, ADF_GEN4_REG_CPPMEMTGTERR, cppmemtgterr);
1477 
1478 	return reset_required;
1479 }
1480 
adf_handle_atufaultstatus(struct adf_accel_dev * accel_dev,void __iomem * csr,u32 errsou)1481 static bool adf_handle_atufaultstatus(struct adf_accel_dev *accel_dev,
1482 				      void __iomem *csr, u32 errsou)
1483 {
1484 	u32 i;
1485 	u32 max_rp_num = GET_HW_DATA(accel_dev)->num_banks;
1486 
1487 	if (!(errsou & ADF_GEN4_ERRSOU3_ATUFAULTSTATUS_BIT))
1488 		return false;
1489 
1490 	for (i = 0; i < max_rp_num; i++) {
1491 		u32 atufaultstatus = ADF_CSR_RD(csr, ADF_GEN4_ATUFAULTSTATUS(i));
1492 
1493 		atufaultstatus &= ADF_GEN4_ATUFAULTSTATUS_BIT;
1494 
1495 		if (atufaultstatus) {
1496 			dev_err(&GET_DEV(accel_dev),
1497 				"Ring Pair (%u) ATU detected fault: 0x%x\n", i,
1498 				atufaultstatus);
1499 
1500 			ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
1501 
1502 			ADF_CSR_WR(csr, ADF_GEN4_ATUFAULTSTATUS(i), atufaultstatus);
1503 		}
1504 	}
1505 
1506 	return false;
1507 }
1508 
adf_gen4_process_errsou3(struct adf_accel_dev * accel_dev,void __iomem * csr,void __iomem * aram_csr,u32 errsou,bool * reset_required)1509 static void adf_gen4_process_errsou3(struct adf_accel_dev *accel_dev,
1510 				     void __iomem *csr, void __iomem *aram_csr,
1511 				     u32 errsou, bool *reset_required)
1512 {
1513 	*reset_required |= adf_handle_timiscsts(accel_dev, csr, errsou);
1514 	*reset_required |= adf_handle_ricppintsts(accel_dev, csr, errsou);
1515 	*reset_required |= adf_handle_ticppintsts(accel_dev, csr, errsou);
1516 	*reset_required |= adf_handle_aramcerr(accel_dev, aram_csr, errsou);
1517 	*reset_required |= adf_handle_aramuerr(accel_dev, aram_csr, errsou);
1518 	*reset_required |= adf_handle_reg_cppmemtgterr(accel_dev, aram_csr, errsou);
1519 	*reset_required |= adf_handle_atufaultstatus(accel_dev, csr, errsou);
1520 }
1521 
adf_gen4_handle_interrupt(struct adf_accel_dev * accel_dev,bool * reset_required)1522 static bool adf_gen4_handle_interrupt(struct adf_accel_dev *accel_dev,
1523 				      bool *reset_required)
1524 {
1525 	void __iomem *aram_csr = adf_get_aram_base(accel_dev);
1526 	void __iomem *csr = adf_get_pmisc_base(accel_dev);
1527 	u32 errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU0);
1528 	bool handled = false;
1529 
1530 	*reset_required = false;
1531 
1532 	if (errsou & ADF_GEN4_ERRSOU0_BIT) {
1533 		adf_gen4_process_errsou0(accel_dev, csr);
1534 		handled = true;
1535 	}
1536 
1537 	errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU1);
1538 	if (errsou & ADF_GEN4_ERRSOU1_BITMASK) {
1539 		adf_gen4_process_errsou1(accel_dev, csr, errsou, reset_required);
1540 		handled = true;
1541 	}
1542 
1543 	errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU2);
1544 	if (errsou & ADF_GEN4_ERRSOU2_BITMASK) {
1545 		adf_gen4_process_errsou2(accel_dev, csr, errsou, reset_required);
1546 		handled = true;
1547 	}
1548 
1549 	errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU3);
1550 	if (errsou & ADF_GEN4_ERRSOU3_BITMASK) {
1551 		adf_gen4_process_errsou3(accel_dev, csr, aram_csr, errsou, reset_required);
1552 		handled = true;
1553 	}
1554 
1555 	return handled;
1556 }
1557 
adf_gen4_init_ras_ops(struct adf_ras_ops * ras_ops)1558 void adf_gen4_init_ras_ops(struct adf_ras_ops *ras_ops)
1559 {
1560 	ras_ops->enable_ras_errors = adf_gen4_enable_ras;
1561 	ras_ops->disable_ras_errors = adf_gen4_disable_ras;
1562 	ras_ops->handle_interrupt = adf_gen4_handle_interrupt;
1563 }
1564 EXPORT_SYMBOL_GPL(adf_gen4_init_ras_ops);
1565