xref: /linux/drivers/gpu/drm/xe/xe_hw_error.c (revision 5ea5880764cbb164afb17a62e76ca75dc371409d)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2025 Intel Corporation
4  */
5 
6 #include <linux/bitmap.h>
7 #include <linux/fault-inject.h>
8 
9 #include "regs/xe_gsc_regs.h"
10 #include "regs/xe_hw_error_regs.h"
11 #include "regs/xe_irq_regs.h"
12 
13 #include "xe_device.h"
14 #include "xe_drm_ras.h"
15 #include "xe_hw_error.h"
16 #include "xe_mmio.h"
17 #include "xe_survivability_mode.h"
18 
19 #define GT_HW_ERROR_MAX_ERR_BITS		16
20 #define HEC_UNCORR_FW_ERR_BITS			4
21 #define XE_RAS_REG_SIZE				32
22 #define XE_SOC_NUM_IEH				2
23 
24 #define PVC_ERROR_MASK_SET(hw_err, err_bit)	((hw_err == HARDWARE_ERROR_CORRECTABLE) ? \
25 						 (PVC_COR_ERR_MASK & REG_BIT(err_bit)) : \
26 						 (PVC_FAT_ERR_MASK & REG_BIT(err_bit)))
27 
28 extern struct fault_attr inject_csc_hw_error;
29 
30 static const char * const error_severity[] = DRM_XE_RAS_ERROR_SEVERITY_NAMES;
31 
32 static const char * const hec_uncorrected_fw_errors[] = {
33 	"Fatal",
34 	"CSE Disabled",
35 	"FD Corruption",
36 	"Data Corruption"
37 };
38 
39 static const unsigned long xe_hw_error_map[] = {
40 	[XE_GT_ERROR]	= DRM_XE_RAS_ERR_COMP_CORE_COMPUTE,
41 	[XE_SOC_ERROR]	= DRM_XE_RAS_ERR_COMP_SOC_INTERNAL,
42 };
43 
44 enum gt_vector_regs {
45 	ERR_STAT_GT_VECTOR0 = 0,
46 	ERR_STAT_GT_VECTOR1,
47 	ERR_STAT_GT_VECTOR2,
48 	ERR_STAT_GT_VECTOR3,
49 	ERR_STAT_GT_VECTOR4,
50 	ERR_STAT_GT_VECTOR5,
51 	ERR_STAT_GT_VECTOR6,
52 	ERR_STAT_GT_VECTOR7,
53 	ERR_STAT_GT_VECTOR_MAX
54 };
55 
56 #define PVC_GT_VECTOR_LEN(hw_err)	((hw_err == HARDWARE_ERROR_CORRECTABLE) ? \
57 					 ERR_STAT_GT_VECTOR4 : ERR_STAT_GT_VECTOR_MAX)
58 
59 static enum drm_xe_ras_error_severity hw_err_to_severity(const enum hardware_error hw_err)
60 {
61 	if (hw_err == HARDWARE_ERROR_CORRECTABLE)
62 		return DRM_XE_RAS_ERR_SEV_CORRECTABLE;
63 
64 	/* Uncorrectable errors comprise of both fatal and non-fatal errors */
65 	return DRM_XE_RAS_ERR_SEV_UNCORRECTABLE;
66 }
67 
68 static const char * const pvc_master_global_err_reg[] = {
69 	[0 ... 1]	= "Undefined",
70 	[2]		= "HBM SS0: Channel0",
71 	[3]		= "HBM SS0: Channel1",
72 	[4]		= "HBM SS0: Channel2",
73 	[5]		= "HBM SS0: Channel3",
74 	[6]		= "HBM SS0: Channel4",
75 	[7]		= "HBM SS0: Channel5",
76 	[8]		= "HBM SS0: Channel6",
77 	[9]		= "HBM SS0: Channel7",
78 	[10]		= "HBM SS1: Channel0",
79 	[11]		= "HBM SS1: Channel1",
80 	[12]		= "HBM SS1: Channel2",
81 	[13]		= "HBM SS1: Channel3",
82 	[14]		= "HBM SS1: Channel4",
83 	[15]		= "HBM SS1: Channel5",
84 	[16]		= "HBM SS1: Channel6",
85 	[17]		= "HBM SS1: Channel7",
86 	[18 ... 31]	= "Undefined",
87 };
88 static_assert(ARRAY_SIZE(pvc_master_global_err_reg) == XE_RAS_REG_SIZE);
89 
90 static const char * const pvc_slave_global_err_reg[] = {
91 	[0]		= "Undefined",
92 	[1]		= "HBM SS2: Channel0",
93 	[2]		= "HBM SS2: Channel1",
94 	[3]		= "HBM SS2: Channel2",
95 	[4]		= "HBM SS2: Channel3",
96 	[5]		= "HBM SS2: Channel4",
97 	[6]		= "HBM SS2: Channel5",
98 	[7]		= "HBM SS2: Channel6",
99 	[8]		= "HBM SS2: Channel7",
100 	[9]		= "HBM SS3: Channel0",
101 	[10]		= "HBM SS3: Channel1",
102 	[11]		= "HBM SS3: Channel2",
103 	[12]		= "HBM SS3: Channel3",
104 	[13]		= "HBM SS3: Channel4",
105 	[14]		= "HBM SS3: Channel5",
106 	[15]		= "HBM SS3: Channel6",
107 	[16]		= "HBM SS3: Channel7",
108 	[17]		= "Undefined",
109 	[18]		= "ANR MDFI",
110 	[19 ... 31]	= "Undefined",
111 };
112 static_assert(ARRAY_SIZE(pvc_slave_global_err_reg) == XE_RAS_REG_SIZE);
113 
114 static const char * const pvc_slave_local_fatal_err_reg[] = {
115 	[0]		= "Local IEH: Malformed PCIe AER",
116 	[1]		= "Local IEH: Malformed PCIe ERR",
117 	[2]		= "Local IEH: UR conditions in IEH",
118 	[3]		= "Local IEH: From SERR Sources",
119 	[4 ... 19]	= "Undefined",
120 	[20]		= "Malformed MCA error packet (HBM/Punit)",
121 	[21 ... 31]	= "Undefined",
122 };
123 static_assert(ARRAY_SIZE(pvc_slave_local_fatal_err_reg) == XE_RAS_REG_SIZE);
124 
125 static const char * const pvc_master_local_fatal_err_reg[] = {
126 	[0]		= "Local IEH: Malformed IOSF PCIe AER",
127 	[1]		= "Local IEH: Malformed IOSF PCIe ERR",
128 	[2]		= "Local IEH: UR RESPONSE",
129 	[3]		= "Local IEH: From SERR SPI controller",
130 	[4]		= "Base Die MDFI T2T",
131 	[5]		= "Undefined",
132 	[6]		= "Base Die MDFI T2C",
133 	[7]		= "Undefined",
134 	[8]		= "Invalid CSC PSF Command Parity",
135 	[9]		= "Invalid CSC PSF Unexpected Completion",
136 	[10]		= "Invalid CSC PSF Unsupported Request",
137 	[11]		= "Invalid PCIe PSF Command Parity",
138 	[12]		= "PCIe PSF Unexpected Completion",
139 	[13]		= "PCIe PSF Unsupported Request",
140 	[14 ... 19]	= "Undefined",
141 	[20]		= "Malformed MCA error packet (HBM/Punit)",
142 	[21 ... 31]	= "Undefined",
143 };
144 static_assert(ARRAY_SIZE(pvc_master_local_fatal_err_reg) == XE_RAS_REG_SIZE);
145 
146 static const char * const pvc_master_local_nonfatal_err_reg[] = {
147 	[0 ... 3]	= "Undefined",
148 	[4]		= "Base Die MDFI T2T",
149 	[5]		= "Undefined",
150 	[6]		= "Base Die MDFI T2C",
151 	[7]		= "Undefined",
152 	[8]		= "Invalid CSC PSF Command Parity",
153 	[9]		= "Invalid CSC PSF Unexpected Completion",
154 	[10]		= "Invalid PCIe PSF Command Parity",
155 	[11 ... 31]	= "Undefined",
156 };
157 static_assert(ARRAY_SIZE(pvc_master_local_nonfatal_err_reg) == XE_RAS_REG_SIZE);
158 
159 #define PVC_MASTER_LOCAL_REG_INFO(hw_err)	((hw_err == HARDWARE_ERROR_FATAL) ? \
160 						 pvc_master_local_fatal_err_reg : \
161 						 pvc_master_local_nonfatal_err_reg)
162 
163 static bool fault_inject_csc_hw_error(void)
164 {
165 	return IS_ENABLED(CONFIG_DEBUG_FS) && should_fail(&inject_csc_hw_error, 1);
166 }
167 
168 static void csc_hw_error_work(struct work_struct *work)
169 {
170 	struct xe_tile *tile = container_of(work, typeof(*tile), csc_hw_error_work);
171 	struct xe_device *xe = tile_to_xe(tile);
172 	int ret;
173 
174 	ret = xe_survivability_mode_runtime_enable(xe);
175 	if (ret)
176 		drm_err(&xe->drm, "Failed to enable runtime survivability mode\n");
177 }
178 
179 static void csc_hw_error_handler(struct xe_tile *tile, const enum hardware_error hw_err)
180 {
181 	const enum drm_xe_ras_error_severity severity = hw_err_to_severity(hw_err);
182 	const char *severity_str = error_severity[severity];
183 	struct xe_device *xe = tile_to_xe(tile);
184 	struct xe_mmio *mmio = &tile->mmio;
185 	u32 base, err_bit, err_src;
186 	unsigned long fw_err;
187 
188 	if (xe->info.platform != XE_BATTLEMAGE)
189 		return;
190 
191 	base = BMG_GSC_HECI1_BASE;
192 	lockdep_assert_held(&xe->irq.lock);
193 	err_src = xe_mmio_read32(mmio, HEC_UNCORR_ERR_STATUS(base));
194 	if (!err_src) {
195 		drm_err_ratelimited(&xe->drm, HW_ERR "Tile%d reported %s HEC_ERR_STATUS register blank\n",
196 				    tile->id, severity_str);
197 		return;
198 	}
199 
200 	if (err_src & UNCORR_FW_REPORTED_ERR) {
201 		fw_err = xe_mmio_read32(mmio, HEC_UNCORR_FW_ERR_DW0(base));
202 		for_each_set_bit(err_bit, &fw_err, HEC_UNCORR_FW_ERR_BITS) {
203 			drm_err_ratelimited(&xe->drm, HW_ERR
204 					    "HEC FW %s %s reported, bit[%d] is set\n",
205 					     hec_uncorrected_fw_errors[err_bit], severity_str,
206 					     err_bit);
207 
208 			schedule_work(&tile->csc_hw_error_work);
209 		}
210 	}
211 
212 	xe_mmio_write32(mmio, HEC_UNCORR_ERR_STATUS(base), err_src);
213 }
214 
215 static void log_hw_error(struct xe_tile *tile, const char *name,
216 			 const enum drm_xe_ras_error_severity severity)
217 {
218 	const char *severity_str = error_severity[severity];
219 	struct xe_device *xe = tile_to_xe(tile);
220 
221 	if (severity == DRM_XE_RAS_ERR_SEV_CORRECTABLE)
222 		drm_warn(&xe->drm, "%s %s detected\n", name, severity_str);
223 	else
224 		drm_err_ratelimited(&xe->drm, "%s %s detected\n", name, severity_str);
225 }
226 
227 static void log_gt_err(struct xe_tile *tile, const char *name, int i, u32 err,
228 		       const enum drm_xe_ras_error_severity severity)
229 {
230 	const char *severity_str = error_severity[severity];
231 	struct xe_device *xe = tile_to_xe(tile);
232 
233 	if (severity == DRM_XE_RAS_ERR_SEV_CORRECTABLE)
234 		drm_warn(&xe->drm, "%s %s detected, ERROR_STAT_GT_VECTOR%d:0x%08x\n",
235 			 name, severity_str, i, err);
236 	else
237 		drm_err_ratelimited(&xe->drm, "%s %s detected, ERROR_STAT_GT_VECTOR%d:0x%08x\n",
238 				    name, severity_str, i, err);
239 }
240 
241 static void log_soc_error(struct xe_tile *tile, const char * const *reg_info,
242 			  const enum drm_xe_ras_error_severity severity, u32 err_bit, u32 index)
243 {
244 	const char *severity_str = error_severity[severity];
245 	struct xe_device *xe = tile_to_xe(tile);
246 	struct xe_drm_ras *ras = &xe->ras;
247 	struct xe_drm_ras_counter *info = ras->info[severity];
248 	const char *name;
249 
250 	name = reg_info[err_bit];
251 
252 	if (strcmp(name, "Undefined")) {
253 		if (severity == DRM_XE_RAS_ERR_SEV_CORRECTABLE)
254 			drm_warn(&xe->drm, "%s SOC %s detected", name, severity_str);
255 		else
256 			drm_err_ratelimited(&xe->drm, "%s SOC %s detected", name, severity_str);
257 		atomic_inc(&info[index].counter);
258 	}
259 }
260 
261 static void gt_hw_error_handler(struct xe_tile *tile, const enum hardware_error hw_err,
262 				u32 error_id)
263 {
264 	const enum drm_xe_ras_error_severity severity = hw_err_to_severity(hw_err);
265 	struct xe_device *xe = tile_to_xe(tile);
266 	struct xe_drm_ras *ras = &xe->ras;
267 	struct xe_drm_ras_counter *info = ras->info[severity];
268 	struct xe_mmio *mmio = &tile->mmio;
269 	unsigned long err_stat = 0;
270 	int i;
271 
272 	if (xe->info.platform != XE_PVC)
273 		return;
274 
275 	if (hw_err == HARDWARE_ERROR_NONFATAL) {
276 		atomic_inc(&info[error_id].counter);
277 		log_hw_error(tile, info[error_id].name, severity);
278 		return;
279 	}
280 
281 	for (i = 0; i < PVC_GT_VECTOR_LEN(hw_err); i++) {
282 		u32 vector, val;
283 
284 		vector = xe_mmio_read32(mmio, ERR_STAT_GT_VECTOR_REG(hw_err, i));
285 		if (!vector)
286 			continue;
287 
288 		switch (i) {
289 		case ERR_STAT_GT_VECTOR0:
290 		case ERR_STAT_GT_VECTOR1: {
291 			u32 errbit;
292 
293 			val = hweight32(vector);
294 			atomic_add(val, &info[error_id].counter);
295 			log_gt_err(tile, "Subslice", i, vector, severity);
296 
297 			/*
298 			 * Error status register is only populated once per error.
299 			 * Read the register and clear once.
300 			 */
301 			if (err_stat)
302 				break;
303 
304 			err_stat = xe_mmio_read32(mmio, ERR_STAT_GT_REG(hw_err));
305 			for_each_set_bit(errbit, &err_stat, GT_HW_ERROR_MAX_ERR_BITS) {
306 				if (PVC_ERROR_MASK_SET(hw_err, errbit))
307 					atomic_inc(&info[error_id].counter);
308 			}
309 			if (err_stat)
310 				xe_mmio_write32(mmio, ERR_STAT_GT_REG(hw_err), err_stat);
311 			break;
312 		}
313 		case ERR_STAT_GT_VECTOR2:
314 		case ERR_STAT_GT_VECTOR3:
315 			val = hweight32(vector);
316 			atomic_add(val, &info[error_id].counter);
317 			log_gt_err(tile, "L3 BANK", i, vector, severity);
318 			break;
319 		case ERR_STAT_GT_VECTOR6:
320 			val = hweight32(vector);
321 			atomic_add(val, &info[error_id].counter);
322 			log_gt_err(tile, "TLB", i, vector, severity);
323 			break;
324 		case ERR_STAT_GT_VECTOR7:
325 			val = hweight32(vector);
326 			atomic_add(val, &info[error_id].counter);
327 			log_gt_err(tile, "L3 Fabric", i, vector, severity);
328 			break;
329 		default:
330 			log_gt_err(tile, "Undefined", i, vector, severity);
331 		}
332 
333 		xe_mmio_write32(mmio, ERR_STAT_GT_VECTOR_REG(hw_err, i), vector);
334 	}
335 }
336 
337 static void soc_slave_ieh_handler(struct xe_tile *tile, const enum hardware_error hw_err, u32 error_id)
338 {
339 	const enum drm_xe_ras_error_severity severity = hw_err_to_severity(hw_err);
340 	unsigned long slave_global_errstat, slave_local_errstat;
341 	struct xe_mmio *mmio = &tile->mmio;
342 	u32 regbit, slave;
343 
344 	slave = SOC_PVC_SLAVE_BASE;
345 	slave_global_errstat = xe_mmio_read32(mmio, SOC_GLOBAL_ERR_STAT_REG(slave, hw_err));
346 
347 	if (slave_global_errstat & SOC_IEH1_LOCAL_ERR_STATUS) {
348 		slave_local_errstat = xe_mmio_read32(mmio, SOC_LOCAL_ERR_STAT_REG(slave, hw_err));
349 
350 		if (hw_err == HARDWARE_ERROR_FATAL) {
351 			for_each_set_bit(regbit, &slave_local_errstat, XE_RAS_REG_SIZE)
352 				log_soc_error(tile, pvc_slave_local_fatal_err_reg, severity,
353 					      regbit, error_id);
354 		}
355 
356 		xe_mmio_write32(mmio, SOC_LOCAL_ERR_STAT_REG(slave, hw_err),
357 				slave_local_errstat);
358 	}
359 
360 	for_each_set_bit(regbit, &slave_global_errstat, XE_RAS_REG_SIZE)
361 		log_soc_error(tile, pvc_slave_global_err_reg, severity, regbit, error_id);
362 
363 	xe_mmio_write32(mmio, SOC_GLOBAL_ERR_STAT_REG(slave, hw_err), slave_global_errstat);
364 }
365 
366 static void soc_hw_error_handler(struct xe_tile *tile, const enum hardware_error hw_err,
367 				 u32 error_id)
368 {
369 	const enum drm_xe_ras_error_severity severity = hw_err_to_severity(hw_err);
370 	struct xe_device *xe = tile_to_xe(tile);
371 	struct xe_mmio *mmio = &tile->mmio;
372 	unsigned long master_global_errstat, master_local_errstat;
373 	u32 master, slave, regbit;
374 	int i;
375 
376 	if (xe->info.platform != XE_PVC)
377 		return;
378 
379 	master = SOC_PVC_MASTER_BASE;
380 	slave = SOC_PVC_SLAVE_BASE;
381 
382 	/* Mask error type in GSYSEVTCTL so that no new errors of the type will be reported */
383 	for (i = 0; i < XE_SOC_NUM_IEH; i++)
384 		xe_mmio_write32(mmio, SOC_GSYSEVTCTL_REG(master, slave, i), ~REG_BIT(hw_err));
385 
386 	if (hw_err == HARDWARE_ERROR_CORRECTABLE) {
387 		xe_mmio_write32(mmio, SOC_GLOBAL_ERR_STAT_REG(master, hw_err), REG_GENMASK(31, 0));
388 		xe_mmio_write32(mmio, SOC_LOCAL_ERR_STAT_REG(master, hw_err), REG_GENMASK(31, 0));
389 		xe_mmio_write32(mmio, SOC_GLOBAL_ERR_STAT_REG(slave, hw_err), REG_GENMASK(31, 0));
390 		xe_mmio_write32(mmio, SOC_LOCAL_ERR_STAT_REG(slave, hw_err), REG_GENMASK(31, 0));
391 		goto unmask_gsysevtctl;
392 	}
393 
394 	/*
395 	 * Read the master global IEH error register, if BIT(1) is set then process
396 	 * the slave IEH first. If BIT(0) in global error register is set then process
397 	 * the corresponding local error registers.
398 	 */
399 	master_global_errstat = xe_mmio_read32(mmio, SOC_GLOBAL_ERR_STAT_REG(master, hw_err));
400 	if (master_global_errstat & SOC_SLAVE_IEH)
401 		soc_slave_ieh_handler(tile, hw_err, error_id);
402 
403 	if (master_global_errstat & SOC_IEH0_LOCAL_ERR_STATUS) {
404 		master_local_errstat = xe_mmio_read32(mmio, SOC_LOCAL_ERR_STAT_REG(master, hw_err));
405 
406 		for_each_set_bit(regbit, &master_local_errstat, XE_RAS_REG_SIZE)
407 			log_soc_error(tile, PVC_MASTER_LOCAL_REG_INFO(hw_err), severity, regbit, error_id);
408 
409 		xe_mmio_write32(mmio, SOC_LOCAL_ERR_STAT_REG(master, hw_err), master_local_errstat);
410 	}
411 
412 	for_each_set_bit(regbit, &master_global_errstat, XE_RAS_REG_SIZE)
413 		log_soc_error(tile, pvc_master_global_err_reg, severity, regbit, error_id);
414 
415 	xe_mmio_write32(mmio, SOC_GLOBAL_ERR_STAT_REG(master, hw_err), master_global_errstat);
416 
417 unmask_gsysevtctl:
418 	for (i = 0; i < XE_SOC_NUM_IEH; i++)
419 		xe_mmio_write32(mmio, SOC_GSYSEVTCTL_REG(master, slave, i),
420 				(HARDWARE_ERROR_MAX << 1) + 1);
421 }
422 
423 static void hw_error_source_handler(struct xe_tile *tile, const enum hardware_error hw_err)
424 {
425 	const enum drm_xe_ras_error_severity severity = hw_err_to_severity(hw_err);
426 	const char *severity_str = error_severity[severity];
427 	struct xe_device *xe = tile_to_xe(tile);
428 	struct xe_drm_ras *ras = &xe->ras;
429 	struct xe_drm_ras_counter *info = ras->info[severity];
430 	unsigned long flags, err_src;
431 	u32 err_bit;
432 
433 	if (!IS_DGFX(xe))
434 		return;
435 
436 	spin_lock_irqsave(&xe->irq.lock, flags);
437 	err_src = xe_mmio_read32(&tile->mmio, DEV_ERR_STAT_REG(hw_err));
438 	if (!err_src) {
439 		drm_err_ratelimited(&xe->drm, HW_ERR "Tile%d reported %s DEV_ERR_STAT register blank!\n",
440 				    tile->id, severity_str);
441 		goto unlock;
442 	}
443 
444 	/*
445 	 * On encountering CSC firmware errors, the graphics device becomes unrecoverable
446 	 * so return immediately on error. The only way to recover from these errors is
447 	 * firmware flash. The device will enter Runtime Survivability mode when such
448 	 * errors are detected.
449 	 */
450 	if (err_src & REG_BIT(XE_CSC_ERROR)) {
451 		csc_hw_error_handler(tile, hw_err);
452 		goto clear_reg;
453 	}
454 
455 	if (!info)
456 		goto clear_reg;
457 
458 	for_each_set_bit(err_bit, &err_src, XE_RAS_REG_SIZE) {
459 		const char *name;
460 		u32 error_id;
461 
462 		/* Check error bit is within bounds */
463 		if (err_bit >= ARRAY_SIZE(xe_hw_error_map))
464 			break;
465 
466 		error_id = xe_hw_error_map[err_bit];
467 
468 		/* Check error component is within max */
469 		if (!error_id || error_id >= DRM_XE_RAS_ERR_COMP_MAX)
470 			continue;
471 
472 		name = info[error_id].name;
473 		if (!name)
474 			continue;
475 
476 		if (severity == DRM_XE_RAS_ERR_SEV_CORRECTABLE) {
477 			drm_warn(&xe->drm, HW_ERR
478 				 "TILE%d reported %s %s, bit[%d] is set\n",
479 				 tile->id, name, severity_str, err_bit);
480 		} else {
481 			drm_err_ratelimited(&xe->drm, HW_ERR
482 					    "TILE%d reported %s %s, bit[%d] is set\n",
483 					    tile->id, name, severity_str, err_bit);
484 		}
485 
486 		if (err_bit == XE_GT_ERROR)
487 			gt_hw_error_handler(tile, hw_err, error_id);
488 		if (err_bit == XE_SOC_ERROR)
489 			soc_hw_error_handler(tile, hw_err, error_id);
490 	}
491 
492 clear_reg:
493 	xe_mmio_write32(&tile->mmio, DEV_ERR_STAT_REG(hw_err), err_src);
494 unlock:
495 	spin_unlock_irqrestore(&xe->irq.lock, flags);
496 }
497 
498 /**
499  * xe_hw_error_irq_handler - irq handling for hw errors
500  * @tile: tile instance
501  * @master_ctl: value read from master interrupt register
502  *
503  * Xe platforms add three error bits to the master interrupt register to support error handling.
504  * These three bits are used to convey the class of error FATAL, NONFATAL, or CORRECTABLE.
505  * To process the interrupt, determine the source of error by reading the Device Error Source
506  * Register that corresponds to the class of error being serviced.
507  */
508 void xe_hw_error_irq_handler(struct xe_tile *tile, const u32 master_ctl)
509 {
510 	enum hardware_error hw_err;
511 
512 	if (fault_inject_csc_hw_error())
513 		schedule_work(&tile->csc_hw_error_work);
514 
515 	for (hw_err = 0; hw_err < HARDWARE_ERROR_MAX; hw_err++) {
516 		if (master_ctl & ERROR_IRQ(hw_err))
517 			hw_error_source_handler(tile, hw_err);
518 	}
519 }
520 
521 static int hw_error_info_init(struct xe_device *xe)
522 {
523 	if (xe->info.platform != XE_PVC)
524 		return 0;
525 
526 	return xe_drm_ras_init(xe);
527 }
528 
529 /*
530  * Process hardware errors during boot
531  */
532 static void process_hw_errors(struct xe_device *xe)
533 {
534 	struct xe_tile *tile;
535 	u32 master_ctl;
536 	u8 id;
537 
538 	for_each_tile(tile, xe, id) {
539 		master_ctl = xe_mmio_read32(&tile->mmio, GFX_MSTR_IRQ);
540 		xe_hw_error_irq_handler(tile, master_ctl);
541 		xe_mmio_write32(&tile->mmio, GFX_MSTR_IRQ, master_ctl);
542 	}
543 }
544 
545 /**
546  * xe_hw_error_init - Initialize hw errors
547  * @xe: xe device instance
548  *
549  * Initialize and check for errors that occurred during boot
550  * prior to driver load
551  */
552 void xe_hw_error_init(struct xe_device *xe)
553 {
554 	struct xe_tile *tile = xe_device_get_root_tile(xe);
555 	int ret;
556 
557 	if (!IS_DGFX(xe) || IS_SRIOV_VF(xe))
558 		return;
559 
560 	INIT_WORK(&tile->csc_hw_error_work, csc_hw_error_work);
561 
562 	ret = hw_error_info_init(xe);
563 	if (ret)
564 		drm_err(&xe->drm, "Failed to initialize XE DRM RAS (%pe)\n", ERR_PTR(ret));
565 
566 	process_hw_errors(xe);
567 }
568