xref: /linux/drivers/edac/ghes_edac.c (revision 95298d63c67673c654c08952672d016212b26054)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * GHES/EDAC Linux driver
4  *
5  * Copyright (c) 2013 by Mauro Carvalho Chehab
6  *
7  * Red Hat Inc. http://www.redhat.com
8  */
9 
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 #include <acpi/ghes.h>
13 #include <linux/edac.h>
14 #include <linux/dmi.h>
15 #include "edac_module.h"
16 #include <ras/ras_event.h>
17 
18 struct ghes_edac_pvt {
19 	struct list_head list;
20 	struct ghes *ghes;
21 	struct mem_ctl_info *mci;
22 
23 	/* Buffers for the error handling routine */
24 	char other_detail[400];
25 	char msg[80];
26 };
27 
28 static refcount_t ghes_refcount = REFCOUNT_INIT(0);
29 
30 /*
31  * Access to ghes_pvt must be protected by ghes_lock. The spinlock
32  * also provides the necessary (implicit) memory barrier for the SMP
33  * case to make the pointer visible on another CPU.
34  */
35 static struct ghes_edac_pvt *ghes_pvt;
36 
37 /* GHES registration mutex */
38 static DEFINE_MUTEX(ghes_reg_mutex);
39 
40 /*
41  * Sync with other, potentially concurrent callers of
42  * ghes_edac_report_mem_error(). We don't know what the
43  * "inventive" firmware would do.
44  */
45 static DEFINE_SPINLOCK(ghes_lock);
46 
47 /* "ghes_edac.force_load=1" skips the platform check */
48 static bool __read_mostly force_load;
49 module_param(force_load, bool, 0);
50 
51 /* Memory Device - Type 17 of SMBIOS spec */
52 struct memdev_dmi_entry {
53 	u8 type;
54 	u8 length;
55 	u16 handle;
56 	u16 phys_mem_array_handle;
57 	u16 mem_err_info_handle;
58 	u16 total_width;
59 	u16 data_width;
60 	u16 size;
61 	u8 form_factor;
62 	u8 device_set;
63 	u8 device_locator;
64 	u8 bank_locator;
65 	u8 memory_type;
66 	u16 type_detail;
67 	u16 speed;
68 	u8 manufacturer;
69 	u8 serial_number;
70 	u8 asset_tag;
71 	u8 part_number;
72 	u8 attributes;
73 	u32 extended_size;
74 	u16 conf_mem_clk_speed;
75 } __attribute__((__packed__));
76 
77 struct ghes_edac_dimm_fill {
78 	struct mem_ctl_info *mci;
79 	unsigned int count;
80 };
81 
82 static void ghes_edac_count_dimms(const struct dmi_header *dh, void *arg)
83 {
84 	int *num_dimm = arg;
85 
86 	if (dh->type == DMI_ENTRY_MEM_DEVICE)
87 		(*num_dimm)++;
88 }
89 
90 static int get_dimm_smbios_index(struct mem_ctl_info *mci, u16 handle)
91 {
92 	struct dimm_info *dimm;
93 
94 	mci_for_each_dimm(mci, dimm) {
95 		if (dimm->smbios_handle == handle)
96 			return dimm->idx;
97 	}
98 
99 	return -1;
100 }
101 
102 static void ghes_edac_dmidecode(const struct dmi_header *dh, void *arg)
103 {
104 	struct ghes_edac_dimm_fill *dimm_fill = arg;
105 	struct mem_ctl_info *mci = dimm_fill->mci;
106 
107 	if (dh->type == DMI_ENTRY_MEM_DEVICE) {
108 		struct memdev_dmi_entry *entry = (struct memdev_dmi_entry *)dh;
109 		struct dimm_info *dimm = edac_get_dimm(mci, dimm_fill->count, 0, 0);
110 		u16 rdr_mask = BIT(7) | BIT(13);
111 
112 		if (entry->size == 0xffff) {
113 			pr_info("Can't get DIMM%i size\n",
114 				dimm_fill->count);
115 			dimm->nr_pages = MiB_TO_PAGES(32);/* Unknown */
116 		} else if (entry->size == 0x7fff) {
117 			dimm->nr_pages = MiB_TO_PAGES(entry->extended_size);
118 		} else {
119 			if (entry->size & BIT(15))
120 				dimm->nr_pages = MiB_TO_PAGES((entry->size & 0x7fff) << 10);
121 			else
122 				dimm->nr_pages = MiB_TO_PAGES(entry->size);
123 		}
124 
125 		switch (entry->memory_type) {
126 		case 0x12:
127 			if (entry->type_detail & BIT(13))
128 				dimm->mtype = MEM_RDDR;
129 			else
130 				dimm->mtype = MEM_DDR;
131 			break;
132 		case 0x13:
133 			if (entry->type_detail & BIT(13))
134 				dimm->mtype = MEM_RDDR2;
135 			else
136 				dimm->mtype = MEM_DDR2;
137 			break;
138 		case 0x14:
139 			dimm->mtype = MEM_FB_DDR2;
140 			break;
141 		case 0x18:
142 			if (entry->type_detail & BIT(12))
143 				dimm->mtype = MEM_NVDIMM;
144 			else if (entry->type_detail & BIT(13))
145 				dimm->mtype = MEM_RDDR3;
146 			else
147 				dimm->mtype = MEM_DDR3;
148 			break;
149 		case 0x1a:
150 			if (entry->type_detail & BIT(12))
151 				dimm->mtype = MEM_NVDIMM;
152 			else if (entry->type_detail & BIT(13))
153 				dimm->mtype = MEM_RDDR4;
154 			else
155 				dimm->mtype = MEM_DDR4;
156 			break;
157 		default:
158 			if (entry->type_detail & BIT(6))
159 				dimm->mtype = MEM_RMBS;
160 			else if ((entry->type_detail & rdr_mask) == rdr_mask)
161 				dimm->mtype = MEM_RDR;
162 			else if (entry->type_detail & BIT(7))
163 				dimm->mtype = MEM_SDR;
164 			else if (entry->type_detail & BIT(9))
165 				dimm->mtype = MEM_EDO;
166 			else
167 				dimm->mtype = MEM_UNKNOWN;
168 		}
169 
170 		/*
171 		 * Actually, we can only detect if the memory has bits for
172 		 * checksum or not
173 		 */
174 		if (entry->total_width == entry->data_width)
175 			dimm->edac_mode = EDAC_NONE;
176 		else
177 			dimm->edac_mode = EDAC_SECDED;
178 
179 		dimm->dtype = DEV_UNKNOWN;
180 		dimm->grain = 128;		/* Likely, worse case */
181 
182 		/*
183 		 * FIXME: It shouldn't be hard to also fill the DIMM labels
184 		 */
185 
186 		if (dimm->nr_pages) {
187 			edac_dbg(1, "DIMM%i: %s size = %d MB%s\n",
188 				dimm_fill->count, edac_mem_types[dimm->mtype],
189 				PAGES_TO_MiB(dimm->nr_pages),
190 				(dimm->edac_mode != EDAC_NONE) ? "(ECC)" : "");
191 			edac_dbg(2, "\ttype %d, detail 0x%02x, width %d(total %d)\n",
192 				entry->memory_type, entry->type_detail,
193 				entry->total_width, entry->data_width);
194 		}
195 
196 		dimm->smbios_handle = entry->handle;
197 
198 		dimm_fill->count++;
199 	}
200 }
201 
202 void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
203 {
204 	struct edac_raw_error_desc *e;
205 	struct mem_ctl_info *mci;
206 	struct ghes_edac_pvt *pvt;
207 	unsigned long flags;
208 	char *p;
209 
210 	/*
211 	 * We can do the locking below because GHES defers error processing
212 	 * from NMI to IRQ context. Whenever that changes, we'd at least
213 	 * know.
214 	 */
215 	if (WARN_ON_ONCE(in_nmi()))
216 		return;
217 
218 	spin_lock_irqsave(&ghes_lock, flags);
219 
220 	pvt = ghes_pvt;
221 	if (!pvt)
222 		goto unlock;
223 
224 	mci = pvt->mci;
225 	e = &mci->error_desc;
226 
227 	/* Cleans the error report buffer */
228 	memset(e, 0, sizeof (*e));
229 	e->error_count = 1;
230 	e->grain = 1;
231 	strcpy(e->label, "unknown label");
232 	e->msg = pvt->msg;
233 	e->other_detail = pvt->other_detail;
234 	e->top_layer = -1;
235 	e->mid_layer = -1;
236 	e->low_layer = -1;
237 	*pvt->other_detail = '\0';
238 	*pvt->msg = '\0';
239 
240 	switch (sev) {
241 	case GHES_SEV_CORRECTED:
242 		e->type = HW_EVENT_ERR_CORRECTED;
243 		break;
244 	case GHES_SEV_RECOVERABLE:
245 		e->type = HW_EVENT_ERR_UNCORRECTED;
246 		break;
247 	case GHES_SEV_PANIC:
248 		e->type = HW_EVENT_ERR_FATAL;
249 		break;
250 	default:
251 	case GHES_SEV_NO:
252 		e->type = HW_EVENT_ERR_INFO;
253 	}
254 
255 	edac_dbg(1, "error validation_bits: 0x%08llx\n",
256 		 (long long)mem_err->validation_bits);
257 
258 	/* Error type, mapped on e->msg */
259 	if (mem_err->validation_bits & CPER_MEM_VALID_ERROR_TYPE) {
260 		p = pvt->msg;
261 		switch (mem_err->error_type) {
262 		case 0:
263 			p += sprintf(p, "Unknown");
264 			break;
265 		case 1:
266 			p += sprintf(p, "No error");
267 			break;
268 		case 2:
269 			p += sprintf(p, "Single-bit ECC");
270 			break;
271 		case 3:
272 			p += sprintf(p, "Multi-bit ECC");
273 			break;
274 		case 4:
275 			p += sprintf(p, "Single-symbol ChipKill ECC");
276 			break;
277 		case 5:
278 			p += sprintf(p, "Multi-symbol ChipKill ECC");
279 			break;
280 		case 6:
281 			p += sprintf(p, "Master abort");
282 			break;
283 		case 7:
284 			p += sprintf(p, "Target abort");
285 			break;
286 		case 8:
287 			p += sprintf(p, "Parity Error");
288 			break;
289 		case 9:
290 			p += sprintf(p, "Watchdog timeout");
291 			break;
292 		case 10:
293 			p += sprintf(p, "Invalid address");
294 			break;
295 		case 11:
296 			p += sprintf(p, "Mirror Broken");
297 			break;
298 		case 12:
299 			p += sprintf(p, "Memory Sparing");
300 			break;
301 		case 13:
302 			p += sprintf(p, "Scrub corrected error");
303 			break;
304 		case 14:
305 			p += sprintf(p, "Scrub uncorrected error");
306 			break;
307 		case 15:
308 			p += sprintf(p, "Physical Memory Map-out event");
309 			break;
310 		default:
311 			p += sprintf(p, "reserved error (%d)",
312 				     mem_err->error_type);
313 		}
314 	} else {
315 		strcpy(pvt->msg, "unknown error");
316 	}
317 
318 	/* Error address */
319 	if (mem_err->validation_bits & CPER_MEM_VALID_PA) {
320 		e->page_frame_number = PHYS_PFN(mem_err->physical_addr);
321 		e->offset_in_page = offset_in_page(mem_err->physical_addr);
322 	}
323 
324 	/* Error grain */
325 	if (mem_err->validation_bits & CPER_MEM_VALID_PA_MASK)
326 		e->grain = ~mem_err->physical_addr_mask + 1;
327 
328 	/* Memory error location, mapped on e->location */
329 	p = e->location;
330 	if (mem_err->validation_bits & CPER_MEM_VALID_NODE)
331 		p += sprintf(p, "node:%d ", mem_err->node);
332 	if (mem_err->validation_bits & CPER_MEM_VALID_CARD)
333 		p += sprintf(p, "card:%d ", mem_err->card);
334 	if (mem_err->validation_bits & CPER_MEM_VALID_MODULE)
335 		p += sprintf(p, "module:%d ", mem_err->module);
336 	if (mem_err->validation_bits & CPER_MEM_VALID_RANK_NUMBER)
337 		p += sprintf(p, "rank:%d ", mem_err->rank);
338 	if (mem_err->validation_bits & CPER_MEM_VALID_BANK)
339 		p += sprintf(p, "bank:%d ", mem_err->bank);
340 	if (mem_err->validation_bits & CPER_MEM_VALID_ROW)
341 		p += sprintf(p, "row:%d ", mem_err->row);
342 	if (mem_err->validation_bits & CPER_MEM_VALID_COLUMN)
343 		p += sprintf(p, "col:%d ", mem_err->column);
344 	if (mem_err->validation_bits & CPER_MEM_VALID_BIT_POSITION)
345 		p += sprintf(p, "bit_pos:%d ", mem_err->bit_pos);
346 	if (mem_err->validation_bits & CPER_MEM_VALID_MODULE_HANDLE) {
347 		const char *bank = NULL, *device = NULL;
348 		int index = -1;
349 
350 		dmi_memdev_name(mem_err->mem_dev_handle, &bank, &device);
351 		if (bank != NULL && device != NULL)
352 			p += sprintf(p, "DIMM location:%s %s ", bank, device);
353 		else
354 			p += sprintf(p, "DIMM DMI handle: 0x%.4x ",
355 				     mem_err->mem_dev_handle);
356 
357 		index = get_dimm_smbios_index(mci, mem_err->mem_dev_handle);
358 		if (index >= 0)
359 			e->top_layer = index;
360 	}
361 	if (p > e->location)
362 		*(p - 1) = '\0';
363 
364 	/* All other fields are mapped on e->other_detail */
365 	p = pvt->other_detail;
366 	p += snprintf(p, sizeof(pvt->other_detail),
367 		"APEI location: %s ", e->location);
368 	if (mem_err->validation_bits & CPER_MEM_VALID_ERROR_STATUS) {
369 		u64 status = mem_err->error_status;
370 
371 		p += sprintf(p, "status(0x%016llx): ", (long long)status);
372 		switch ((status >> 8) & 0xff) {
373 		case 1:
374 			p += sprintf(p, "Error detected internal to the component ");
375 			break;
376 		case 16:
377 			p += sprintf(p, "Error detected in the bus ");
378 			break;
379 		case 4:
380 			p += sprintf(p, "Storage error in DRAM memory ");
381 			break;
382 		case 5:
383 			p += sprintf(p, "Storage error in TLB ");
384 			break;
385 		case 6:
386 			p += sprintf(p, "Storage error in cache ");
387 			break;
388 		case 7:
389 			p += sprintf(p, "Error in one or more functional units ");
390 			break;
391 		case 8:
392 			p += sprintf(p, "component failed self test ");
393 			break;
394 		case 9:
395 			p += sprintf(p, "Overflow or undervalue of internal queue ");
396 			break;
397 		case 17:
398 			p += sprintf(p, "Virtual address not found on IO-TLB or IO-PDIR ");
399 			break;
400 		case 18:
401 			p += sprintf(p, "Improper access error ");
402 			break;
403 		case 19:
404 			p += sprintf(p, "Access to a memory address which is not mapped to any component ");
405 			break;
406 		case 20:
407 			p += sprintf(p, "Loss of Lockstep ");
408 			break;
409 		case 21:
410 			p += sprintf(p, "Response not associated with a request ");
411 			break;
412 		case 22:
413 			p += sprintf(p, "Bus parity error - must also set the A, C, or D Bits ");
414 			break;
415 		case 23:
416 			p += sprintf(p, "Detection of a PATH_ERROR ");
417 			break;
418 		case 25:
419 			p += sprintf(p, "Bus operation timeout ");
420 			break;
421 		case 26:
422 			p += sprintf(p, "A read was issued to data that has been poisoned ");
423 			break;
424 		default:
425 			p += sprintf(p, "reserved ");
426 			break;
427 		}
428 	}
429 	if (mem_err->validation_bits & CPER_MEM_VALID_REQUESTOR_ID)
430 		p += sprintf(p, "requestorID: 0x%016llx ",
431 			     (long long)mem_err->requestor_id);
432 	if (mem_err->validation_bits & CPER_MEM_VALID_RESPONDER_ID)
433 		p += sprintf(p, "responderID: 0x%016llx ",
434 			     (long long)mem_err->responder_id);
435 	if (mem_err->validation_bits & CPER_MEM_VALID_TARGET_ID)
436 		p += sprintf(p, "targetID: 0x%016llx ",
437 			     (long long)mem_err->responder_id);
438 	if (p > pvt->other_detail)
439 		*(p - 1) = '\0';
440 
441 	edac_raw_mc_handle_error(e);
442 
443 unlock:
444 	spin_unlock_irqrestore(&ghes_lock, flags);
445 }
446 
447 /*
448  * Known systems that are safe to enable this module.
449  */
450 static struct acpi_platform_list plat_list[] = {
451 	{"HPE   ", "Server  ", 0, ACPI_SIG_FADT, all_versions},
452 	{ } /* End */
453 };
454 
455 int ghes_edac_register(struct ghes *ghes, struct device *dev)
456 {
457 	bool fake = false;
458 	int rc = 0, num_dimm = 0;
459 	struct mem_ctl_info *mci;
460 	struct ghes_edac_pvt *pvt;
461 	struct edac_mc_layer layers[1];
462 	struct ghes_edac_dimm_fill dimm_fill;
463 	unsigned long flags;
464 	int idx = -1;
465 
466 	if (IS_ENABLED(CONFIG_X86)) {
467 		/* Check if safe to enable on this system */
468 		idx = acpi_match_platform_list(plat_list);
469 		if (!force_load && idx < 0)
470 			return -ENODEV;
471 	} else {
472 		idx = 0;
473 	}
474 
475 	/* finish another registration/unregistration instance first */
476 	mutex_lock(&ghes_reg_mutex);
477 
478 	/*
479 	 * We have only one logical memory controller to which all DIMMs belong.
480 	 */
481 	if (refcount_inc_not_zero(&ghes_refcount))
482 		goto unlock;
483 
484 	/* Get the number of DIMMs */
485 	dmi_walk(ghes_edac_count_dimms, &num_dimm);
486 
487 	/* Check if we've got a bogus BIOS */
488 	if (num_dimm == 0) {
489 		fake = true;
490 		num_dimm = 1;
491 	}
492 
493 	layers[0].type = EDAC_MC_LAYER_ALL_MEM;
494 	layers[0].size = num_dimm;
495 	layers[0].is_virt_csrow = true;
496 
497 	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(struct ghes_edac_pvt));
498 	if (!mci) {
499 		pr_info("Can't allocate memory for EDAC data\n");
500 		rc = -ENOMEM;
501 		goto unlock;
502 	}
503 
504 	pvt		= mci->pvt_info;
505 	pvt->ghes	= ghes;
506 	pvt->mci	= mci;
507 
508 	mci->pdev = dev;
509 	mci->mtype_cap = MEM_FLAG_EMPTY;
510 	mci->edac_ctl_cap = EDAC_FLAG_NONE;
511 	mci->edac_cap = EDAC_FLAG_NONE;
512 	mci->mod_name = "ghes_edac.c";
513 	mci->ctl_name = "ghes_edac";
514 	mci->dev_name = "ghes";
515 
516 	if (fake) {
517 		pr_info("This system has a very crappy BIOS: It doesn't even list the DIMMS.\n");
518 		pr_info("Its SMBIOS info is wrong. It is doubtful that the error report would\n");
519 		pr_info("work on such system. Use this driver with caution\n");
520 	} else if (idx < 0) {
521 		pr_info("This EDAC driver relies on BIOS to enumerate memory and get error reports.\n");
522 		pr_info("Unfortunately, not all BIOSes reflect the memory layout correctly.\n");
523 		pr_info("So, the end result of using this driver varies from vendor to vendor.\n");
524 		pr_info("If you find incorrect reports, please contact your hardware vendor\n");
525 		pr_info("to correct its BIOS.\n");
526 		pr_info("This system has %d DIMM sockets.\n", num_dimm);
527 	}
528 
529 	if (!fake) {
530 		dimm_fill.count = 0;
531 		dimm_fill.mci = mci;
532 		dmi_walk(ghes_edac_dmidecode, &dimm_fill);
533 	} else {
534 		struct dimm_info *dimm = edac_get_dimm(mci, 0, 0, 0);
535 
536 		dimm->nr_pages = 1;
537 		dimm->grain = 128;
538 		dimm->mtype = MEM_UNKNOWN;
539 		dimm->dtype = DEV_UNKNOWN;
540 		dimm->edac_mode = EDAC_SECDED;
541 	}
542 
543 	rc = edac_mc_add_mc(mci);
544 	if (rc < 0) {
545 		pr_info("Can't register at EDAC core\n");
546 		edac_mc_free(mci);
547 		rc = -ENODEV;
548 		goto unlock;
549 	}
550 
551 	spin_lock_irqsave(&ghes_lock, flags);
552 	ghes_pvt = pvt;
553 	spin_unlock_irqrestore(&ghes_lock, flags);
554 
555 	/* only set on success */
556 	refcount_set(&ghes_refcount, 1);
557 
558 unlock:
559 	mutex_unlock(&ghes_reg_mutex);
560 
561 	return rc;
562 }
563 
564 void ghes_edac_unregister(struct ghes *ghes)
565 {
566 	struct mem_ctl_info *mci;
567 	unsigned long flags;
568 
569 	mutex_lock(&ghes_reg_mutex);
570 
571 	if (!refcount_dec_and_test(&ghes_refcount))
572 		goto unlock;
573 
574 	/*
575 	 * Wait for the irq handler being finished.
576 	 */
577 	spin_lock_irqsave(&ghes_lock, flags);
578 	mci = ghes_pvt ? ghes_pvt->mci : NULL;
579 	ghes_pvt = NULL;
580 	spin_unlock_irqrestore(&ghes_lock, flags);
581 
582 	if (!mci)
583 		goto unlock;
584 
585 	mci = edac_mc_del_mc(mci->pdev);
586 	if (mci)
587 		edac_mc_free(mci);
588 
589 unlock:
590 	mutex_unlock(&ghes_reg_mutex);
591 }
592