xref: /linux/drivers/acpi/apei/ghes.c (revision 8886640dade4ae2595fcdce511c8bcc716aa47d3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * APEI Generic Hardware Error Source support
4  *
5  * Generic Hardware Error Source provides a way to report platform
6  * hardware errors (such as that from chipset). It works in so called
7  * "Firmware First" mode, that is, hardware errors are reported to
8  * firmware firstly, then reported to Linux by firmware. This way,
9  * some non-standard hardware error registers or non-standard hardware
10  * link can be checked by firmware to produce more hardware error
11  * information for Linux.
12  *
13  * For more information about Generic Hardware Error Source, please
14  * refer to ACPI Specification version 4.0, section 17.3.2.6
15  *
16  * Copyright 2010,2011 Intel Corp.
17  *   Author: Huang Ying <ying.huang@intel.com>
18  */
19 
20 #include <linux/arm_sdei.h>
21 #include <linux/kernel.h>
22 #include <linux/moduleparam.h>
23 #include <linux/init.h>
24 #include <linux/acpi.h>
25 #include <linux/io.h>
26 #include <linux/interrupt.h>
27 #include <linux/timer.h>
28 #include <linux/cper.h>
29 #include <linux/cxl-event.h>
30 #include <linux/platform_device.h>
31 #include <linux/mutex.h>
32 #include <linux/ratelimit.h>
33 #include <linux/vmalloc.h>
34 #include <linux/irq_work.h>
35 #include <linux/llist.h>
36 #include <linux/genalloc.h>
37 #include <linux/pci.h>
38 #include <linux/pfn.h>
39 #include <linux/aer.h>
40 #include <linux/nmi.h>
41 #include <linux/sched/clock.h>
42 #include <linux/uuid.h>
43 #include <linux/ras.h>
44 #include <linux/task_work.h>
45 
46 #include <acpi/actbl1.h>
47 #include <acpi/ghes.h>
48 #include <acpi/apei.h>
49 #include <asm/fixmap.h>
50 #include <asm/tlbflush.h>
51 #include <ras/ras_event.h>
52 
53 #include "apei-internal.h"
54 
55 #define GHES_PFX	"GHES: "
56 
57 #define GHES_ESTATUS_MAX_SIZE		65536
58 #define GHES_ESOURCE_PREALLOC_MAX_SIZE	65536
59 
60 #define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3
61 
62 /* This is just an estimation for memory pool allocation */
63 #define GHES_ESTATUS_CACHE_AVG_SIZE	512
64 
65 #define GHES_ESTATUS_CACHES_SIZE	4
66 
67 #define GHES_ESTATUS_IN_CACHE_MAX_NSEC	10000000000ULL
68 /* Prevent too many caches are allocated because of RCU */
69 #define GHES_ESTATUS_CACHE_ALLOCED_MAX	(GHES_ESTATUS_CACHES_SIZE * 3 / 2)
70 
71 #define GHES_ESTATUS_CACHE_LEN(estatus_len)			\
72 	(sizeof(struct ghes_estatus_cache) + (estatus_len))
73 #define GHES_ESTATUS_FROM_CACHE(estatus_cache)			\
74 	((struct acpi_hest_generic_status *)				\
75 	 ((struct ghes_estatus_cache *)(estatus_cache) + 1))
76 
77 #define GHES_ESTATUS_NODE_LEN(estatus_len)			\
78 	(sizeof(struct ghes_estatus_node) + (estatus_len))
79 #define GHES_ESTATUS_FROM_NODE(estatus_node)			\
80 	((struct acpi_hest_generic_status *)				\
81 	 ((struct ghes_estatus_node *)(estatus_node) + 1))
82 
83 #define GHES_VENDOR_ENTRY_LEN(gdata_len)                               \
84 	(sizeof(struct ghes_vendor_record_entry) + (gdata_len))
85 #define GHES_GDATA_FROM_VENDOR_ENTRY(vendor_entry)                     \
86 	((struct acpi_hest_generic_data *)                              \
87 	((struct ghes_vendor_record_entry *)(vendor_entry) + 1))
88 
89 /*
90  *  NMI-like notifications vary by architecture, before the compiler can prune
91  *  unused static functions it needs a value for these enums.
92  */
93 #ifndef CONFIG_ARM_SDE_INTERFACE
94 #define FIX_APEI_GHES_SDEI_NORMAL	__end_of_fixed_addresses
95 #define FIX_APEI_GHES_SDEI_CRITICAL	__end_of_fixed_addresses
96 #endif
97 
98 static ATOMIC_NOTIFIER_HEAD(ghes_report_chain);
99 
100 static inline bool is_hest_type_generic_v2(struct ghes *ghes)
101 {
102 	return ghes->generic->header.type == ACPI_HEST_TYPE_GENERIC_ERROR_V2;
103 }
104 
105 /*
106  * A platform may describe one error source for the handling of synchronous
107  * errors (e.g. MCE or SEA), or for handling asynchronous errors (e.g. SCI
108  * or External Interrupt). On x86, the HEST notifications are always
109  * asynchronous, so only SEA on ARM is delivered as a synchronous
110  * notification.
111  */
112 static inline bool is_hest_sync_notify(struct ghes *ghes)
113 {
114 	u8 notify_type = ghes->generic->notify.type;
115 
116 	return notify_type == ACPI_HEST_NOTIFY_SEA;
117 }
118 
119 /*
120  * This driver isn't really modular, however for the time being,
121  * continuing to use module_param is the easiest way to remain
122  * compatible with existing boot arg use cases.
123  */
124 bool ghes_disable;
125 module_param_named(disable, ghes_disable, bool, 0);
126 
127 /*
128  * "ghes.edac_force_enable" forcibly enables ghes_edac and skips the platform
129  * check.
130  */
131 static bool ghes_edac_force_enable;
132 module_param_named(edac_force_enable, ghes_edac_force_enable, bool, 0);
133 
134 /*
135  * All error sources notified with HED (Hardware Error Device) share a
136  * single notifier callback, so they need to be linked and checked one
137  * by one. This holds true for NMI too.
138  *
139  * RCU is used for these lists, so ghes_list_mutex is only used for
140  * list changing, not for traversing.
141  */
142 static LIST_HEAD(ghes_hed);
143 static DEFINE_MUTEX(ghes_list_mutex);
144 
145 /*
146  * A list of GHES devices which are given to the corresponding EDAC driver
147  * ghes_edac for further use.
148  */
149 static LIST_HEAD(ghes_devs);
150 static DEFINE_MUTEX(ghes_devs_mutex);
151 
152 /*
153  * Because the memory area used to transfer hardware error information
154  * from BIOS to Linux can be determined only in NMI, IRQ or timer
155  * handler, but general ioremap can not be used in atomic context, so
156  * the fixmap is used instead.
157  *
158  * This spinlock is used to prevent the fixmap entry from being used
159  * simultaneously.
160  */
161 static DEFINE_SPINLOCK(ghes_notify_lock_irq);
162 
163 struct ghes_vendor_record_entry {
164 	struct work_struct work;
165 	int error_severity;
166 	char vendor_record[];
167 };
168 
169 static struct gen_pool *ghes_estatus_pool;
170 
171 static struct ghes_estatus_cache __rcu *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
172 static atomic_t ghes_estatus_cache_alloced;
173 
174 static int ghes_panic_timeout __read_mostly = 30;
175 
176 static void __iomem *ghes_map(u64 pfn, enum fixed_addresses fixmap_idx)
177 {
178 	phys_addr_t paddr;
179 	pgprot_t prot;
180 
181 	paddr = PFN_PHYS(pfn);
182 	prot = arch_apei_get_mem_attribute(paddr);
183 	__set_fixmap(fixmap_idx, paddr, prot);
184 
185 	return (void __iomem *) __fix_to_virt(fixmap_idx);
186 }
187 
188 static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx)
189 {
190 	int _idx = virt_to_fix((unsigned long)vaddr);
191 
192 	WARN_ON_ONCE(fixmap_idx != _idx);
193 	clear_fixmap(fixmap_idx);
194 }
195 
196 int ghes_estatus_pool_init(unsigned int num_ghes)
197 {
198 	unsigned long addr, len;
199 	int rc;
200 
201 	ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1);
202 	if (!ghes_estatus_pool)
203 		return -ENOMEM;
204 
205 	len = GHES_ESTATUS_CACHE_AVG_SIZE * GHES_ESTATUS_CACHE_ALLOCED_MAX;
206 	len += (num_ghes * GHES_ESOURCE_PREALLOC_MAX_SIZE);
207 
208 	addr = (unsigned long)vmalloc(PAGE_ALIGN(len));
209 	if (!addr)
210 		goto err_pool_alloc;
211 
212 	rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
213 	if (rc)
214 		goto err_pool_add;
215 
216 	return 0;
217 
218 err_pool_add:
219 	vfree((void *)addr);
220 
221 err_pool_alloc:
222 	gen_pool_destroy(ghes_estatus_pool);
223 
224 	return -ENOMEM;
225 }
226 
227 /**
228  * ghes_estatus_pool_region_free - free previously allocated memory
229  *				   from the ghes_estatus_pool.
230  * @addr: address of memory to free.
231  * @size: size of memory to free.
232  *
233  * Returns none.
234  */
235 void ghes_estatus_pool_region_free(unsigned long addr, u32 size)
236 {
237 	gen_pool_free(ghes_estatus_pool, addr, size);
238 }
239 EXPORT_SYMBOL_GPL(ghes_estatus_pool_region_free);
240 
241 static int map_gen_v2(struct ghes *ghes)
242 {
243 	return apei_map_generic_address(&ghes->generic_v2->read_ack_register);
244 }
245 
246 static void unmap_gen_v2(struct ghes *ghes)
247 {
248 	apei_unmap_generic_address(&ghes->generic_v2->read_ack_register);
249 }
250 
251 static void ghes_ack_error(struct acpi_hest_generic_v2 *gv2)
252 {
253 	int rc;
254 	u64 val = 0;
255 
256 	rc = apei_read(&val, &gv2->read_ack_register);
257 	if (rc)
258 		return;
259 
260 	val &= gv2->read_ack_preserve << gv2->read_ack_register.bit_offset;
261 	val |= gv2->read_ack_write    << gv2->read_ack_register.bit_offset;
262 
263 	apei_write(val, &gv2->read_ack_register);
264 }
265 
266 static struct ghes *ghes_new(struct acpi_hest_generic *generic)
267 {
268 	struct ghes *ghes;
269 	unsigned int error_block_length;
270 	int rc;
271 
272 	ghes = kzalloc(sizeof(*ghes), GFP_KERNEL);
273 	if (!ghes)
274 		return ERR_PTR(-ENOMEM);
275 
276 	ghes->generic = generic;
277 	if (is_hest_type_generic_v2(ghes)) {
278 		rc = map_gen_v2(ghes);
279 		if (rc)
280 			goto err_free;
281 	}
282 
283 	rc = apei_map_generic_address(&generic->error_status_address);
284 	if (rc)
285 		goto err_unmap_read_ack_addr;
286 	error_block_length = generic->error_block_length;
287 	if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
288 		pr_warn(FW_WARN GHES_PFX
289 			"Error status block length is too long: %u for "
290 			"generic hardware error source: %d.\n",
291 			error_block_length, generic->header.source_id);
292 		error_block_length = GHES_ESTATUS_MAX_SIZE;
293 	}
294 	ghes->estatus = kmalloc(error_block_length, GFP_KERNEL);
295 	if (!ghes->estatus) {
296 		rc = -ENOMEM;
297 		goto err_unmap_status_addr;
298 	}
299 
300 	return ghes;
301 
302 err_unmap_status_addr:
303 	apei_unmap_generic_address(&generic->error_status_address);
304 err_unmap_read_ack_addr:
305 	if (is_hest_type_generic_v2(ghes))
306 		unmap_gen_v2(ghes);
307 err_free:
308 	kfree(ghes);
309 	return ERR_PTR(rc);
310 }
311 
312 static void ghes_fini(struct ghes *ghes)
313 {
314 	kfree(ghes->estatus);
315 	apei_unmap_generic_address(&ghes->generic->error_status_address);
316 	if (is_hest_type_generic_v2(ghes))
317 		unmap_gen_v2(ghes);
318 }
319 
320 static inline int ghes_severity(int severity)
321 {
322 	switch (severity) {
323 	case CPER_SEV_INFORMATIONAL:
324 		return GHES_SEV_NO;
325 	case CPER_SEV_CORRECTED:
326 		return GHES_SEV_CORRECTED;
327 	case CPER_SEV_RECOVERABLE:
328 		return GHES_SEV_RECOVERABLE;
329 	case CPER_SEV_FATAL:
330 		return GHES_SEV_PANIC;
331 	default:
332 		/* Unknown, go panic */
333 		return GHES_SEV_PANIC;
334 	}
335 }
336 
337 static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
338 				  int from_phys,
339 				  enum fixed_addresses fixmap_idx)
340 {
341 	void __iomem *vaddr;
342 	u64 offset;
343 	u32 trunk;
344 
345 	while (len > 0) {
346 		offset = paddr - (paddr & PAGE_MASK);
347 		vaddr = ghes_map(PHYS_PFN(paddr), fixmap_idx);
348 		trunk = PAGE_SIZE - offset;
349 		trunk = min(trunk, len);
350 		if (from_phys)
351 			memcpy_fromio(buffer, vaddr + offset, trunk);
352 		else
353 			memcpy_toio(vaddr + offset, buffer, trunk);
354 		len -= trunk;
355 		paddr += trunk;
356 		buffer += trunk;
357 		ghes_unmap(vaddr, fixmap_idx);
358 	}
359 }
360 
361 /* Check the top-level record header has an appropriate size. */
362 static int __ghes_check_estatus(struct ghes *ghes,
363 				struct acpi_hest_generic_status *estatus)
364 {
365 	u32 len = cper_estatus_len(estatus);
366 
367 	if (len < sizeof(*estatus)) {
368 		pr_warn_ratelimited(FW_WARN GHES_PFX "Truncated error status block!\n");
369 		return -EIO;
370 	}
371 
372 	if (len > ghes->generic->error_block_length) {
373 		pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid error status block length!\n");
374 		return -EIO;
375 	}
376 
377 	if (cper_estatus_check_header(estatus)) {
378 		pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid CPER header!\n");
379 		return -EIO;
380 	}
381 
382 	return 0;
383 }
384 
385 /* Read the CPER block, returning its address, and header in estatus. */
386 static int __ghes_peek_estatus(struct ghes *ghes,
387 			       struct acpi_hest_generic_status *estatus,
388 			       u64 *buf_paddr, enum fixed_addresses fixmap_idx)
389 {
390 	struct acpi_hest_generic *g = ghes->generic;
391 	int rc;
392 
393 	rc = apei_read(buf_paddr, &g->error_status_address);
394 	if (rc) {
395 		*buf_paddr = 0;
396 		pr_warn_ratelimited(FW_WARN GHES_PFX
397 "Failed to read error status block address for hardware error source: %d.\n",
398 				   g->header.source_id);
399 		return -EIO;
400 	}
401 	if (!*buf_paddr)
402 		return -ENOENT;
403 
404 	ghes_copy_tofrom_phys(estatus, *buf_paddr, sizeof(*estatus), 1,
405 			      fixmap_idx);
406 	if (!estatus->block_status) {
407 		*buf_paddr = 0;
408 		return -ENOENT;
409 	}
410 
411 	return 0;
412 }
413 
414 static int __ghes_read_estatus(struct acpi_hest_generic_status *estatus,
415 			       u64 buf_paddr, enum fixed_addresses fixmap_idx,
416 			       size_t buf_len)
417 {
418 	ghes_copy_tofrom_phys(estatus, buf_paddr, buf_len, 1, fixmap_idx);
419 	if (cper_estatus_check(estatus)) {
420 		pr_warn_ratelimited(FW_WARN GHES_PFX
421 				    "Failed to read error status block!\n");
422 		return -EIO;
423 	}
424 
425 	return 0;
426 }
427 
428 static int ghes_read_estatus(struct ghes *ghes,
429 			     struct acpi_hest_generic_status *estatus,
430 			     u64 *buf_paddr, enum fixed_addresses fixmap_idx)
431 {
432 	int rc;
433 
434 	rc = __ghes_peek_estatus(ghes, estatus, buf_paddr, fixmap_idx);
435 	if (rc)
436 		return rc;
437 
438 	rc = __ghes_check_estatus(ghes, estatus);
439 	if (rc)
440 		return rc;
441 
442 	return __ghes_read_estatus(estatus, *buf_paddr, fixmap_idx,
443 				   cper_estatus_len(estatus));
444 }
445 
446 static void ghes_clear_estatus(struct ghes *ghes,
447 			       struct acpi_hest_generic_status *estatus,
448 			       u64 buf_paddr, enum fixed_addresses fixmap_idx)
449 {
450 	estatus->block_status = 0;
451 
452 	if (!buf_paddr)
453 		return;
454 
455 	ghes_copy_tofrom_phys(estatus, buf_paddr,
456 			      sizeof(estatus->block_status), 0,
457 			      fixmap_idx);
458 
459 	/*
460 	 * GHESv2 type HEST entries introduce support for error acknowledgment,
461 	 * so only acknowledge the error if this support is present.
462 	 */
463 	if (is_hest_type_generic_v2(ghes))
464 		ghes_ack_error(ghes->generic_v2);
465 }
466 
467 /*
468  * Called as task_work before returning to user-space.
469  * Ensure any queued work has been done before we return to the context that
470  * triggered the notification.
471  */
472 static void ghes_kick_task_work(struct callback_head *head)
473 {
474 	struct acpi_hest_generic_status *estatus;
475 	struct ghes_estatus_node *estatus_node;
476 	u32 node_len;
477 
478 	estatus_node = container_of(head, struct ghes_estatus_node, task_work);
479 	if (IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
480 		memory_failure_queue_kick(estatus_node->task_work_cpu);
481 
482 	estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
483 	node_len = GHES_ESTATUS_NODE_LEN(cper_estatus_len(estatus));
484 	gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len);
485 }
486 
487 static bool ghes_do_memory_failure(u64 physical_addr, int flags)
488 {
489 	unsigned long pfn;
490 
491 	if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
492 		return false;
493 
494 	pfn = PHYS_PFN(physical_addr);
495 	if (!pfn_valid(pfn) && !arch_is_platform_page(physical_addr)) {
496 		pr_warn_ratelimited(FW_WARN GHES_PFX
497 		"Invalid address in generic error data: %#llx\n",
498 		physical_addr);
499 		return false;
500 	}
501 
502 	memory_failure_queue(pfn, flags);
503 	return true;
504 }
505 
506 static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
507 				       int sev, bool sync)
508 {
509 	int flags = -1;
510 	int sec_sev = ghes_severity(gdata->error_severity);
511 	struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
512 
513 	if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
514 		return false;
515 
516 	/* iff following two events can be handled properly by now */
517 	if (sec_sev == GHES_SEV_CORRECTED &&
518 	    (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED))
519 		flags = MF_SOFT_OFFLINE;
520 	if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE)
521 		flags = sync ? MF_ACTION_REQUIRED : 0;
522 
523 	if (flags != -1)
524 		return ghes_do_memory_failure(mem_err->physical_addr, flags);
525 
526 	return false;
527 }
528 
529 static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata,
530 				       int sev, bool sync)
531 {
532 	struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
533 	int flags = sync ? MF_ACTION_REQUIRED : 0;
534 	bool queued = false;
535 	int sec_sev, i;
536 	char *p;
537 
538 	log_arm_hw_error(err);
539 
540 	sec_sev = ghes_severity(gdata->error_severity);
541 	if (sev != GHES_SEV_RECOVERABLE || sec_sev != GHES_SEV_RECOVERABLE)
542 		return false;
543 
544 	p = (char *)(err + 1);
545 	for (i = 0; i < err->err_info_num; i++) {
546 		struct cper_arm_err_info *err_info = (struct cper_arm_err_info *)p;
547 		bool is_cache = (err_info->type == CPER_ARM_CACHE_ERROR);
548 		bool has_pa = (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR);
549 		const char *error_type = "unknown error";
550 
551 		/*
552 		 * The field (err_info->error_info & BIT(26)) is fixed to set to
553 		 * 1 in some old firmware of HiSilicon Kunpeng920. We assume that
554 		 * firmware won't mix corrected errors in an uncorrected section,
555 		 * and don't filter out 'corrected' error here.
556 		 */
557 		if (is_cache && has_pa) {
558 			queued = ghes_do_memory_failure(err_info->physical_fault_addr, flags);
559 			p += err_info->length;
560 			continue;
561 		}
562 
563 		if (err_info->type < ARRAY_SIZE(cper_proc_error_type_strs))
564 			error_type = cper_proc_error_type_strs[err_info->type];
565 
566 		pr_warn_ratelimited(FW_WARN GHES_PFX
567 				    "Unhandled processor error type: %s\n",
568 				    error_type);
569 		p += err_info->length;
570 	}
571 
572 	return queued;
573 }
574 
575 /*
576  * PCIe AER errors need to be sent to the AER driver for reporting and
577  * recovery. The GHES severities map to the following AER severities and
578  * require the following handling:
579  *
580  * GHES_SEV_CORRECTABLE -> AER_CORRECTABLE
581  *     These need to be reported by the AER driver but no recovery is
582  *     necessary.
583  * GHES_SEV_RECOVERABLE -> AER_NONFATAL
584  * GHES_SEV_RECOVERABLE && CPER_SEC_RESET -> AER_FATAL
585  *     These both need to be reported and recovered from by the AER driver.
586  * GHES_SEV_PANIC does not make it to this handling since the kernel must
587  *     panic.
588  */
589 static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
590 {
591 #ifdef CONFIG_ACPI_APEI_PCIEAER
592 	struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata);
593 
594 	if (pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
595 	    pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
596 		unsigned int devfn;
597 		int aer_severity;
598 		u8 *aer_info;
599 
600 		devfn = PCI_DEVFN(pcie_err->device_id.device,
601 				  pcie_err->device_id.function);
602 		aer_severity = cper_severity_to_aer(gdata->error_severity);
603 
604 		/*
605 		 * If firmware reset the component to contain
606 		 * the error, we must reinitialize it before
607 		 * use, so treat it as a fatal AER error.
608 		 */
609 		if (gdata->flags & CPER_SEC_RESET)
610 			aer_severity = AER_FATAL;
611 
612 		aer_info = (void *)gen_pool_alloc(ghes_estatus_pool,
613 						  sizeof(struct aer_capability_regs));
614 		if (!aer_info)
615 			return;
616 		memcpy(aer_info, pcie_err->aer_info, sizeof(struct aer_capability_regs));
617 
618 		aer_recover_queue(pcie_err->device_id.segment,
619 				  pcie_err->device_id.bus,
620 				  devfn, aer_severity,
621 				  (struct aer_capability_regs *)
622 				  aer_info);
623 	}
624 #endif
625 }
626 
627 static BLOCKING_NOTIFIER_HEAD(vendor_record_notify_list);
628 
629 int ghes_register_vendor_record_notifier(struct notifier_block *nb)
630 {
631 	return blocking_notifier_chain_register(&vendor_record_notify_list, nb);
632 }
633 EXPORT_SYMBOL_GPL(ghes_register_vendor_record_notifier);
634 
635 void ghes_unregister_vendor_record_notifier(struct notifier_block *nb)
636 {
637 	blocking_notifier_chain_unregister(&vendor_record_notify_list, nb);
638 }
639 EXPORT_SYMBOL_GPL(ghes_unregister_vendor_record_notifier);
640 
641 static void ghes_vendor_record_work_func(struct work_struct *work)
642 {
643 	struct ghes_vendor_record_entry *entry;
644 	struct acpi_hest_generic_data *gdata;
645 	u32 len;
646 
647 	entry = container_of(work, struct ghes_vendor_record_entry, work);
648 	gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry);
649 
650 	blocking_notifier_call_chain(&vendor_record_notify_list,
651 				     entry->error_severity, gdata);
652 
653 	len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata));
654 	gen_pool_free(ghes_estatus_pool, (unsigned long)entry, len);
655 }
656 
657 static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
658 					  int sev)
659 {
660 	struct acpi_hest_generic_data *copied_gdata;
661 	struct ghes_vendor_record_entry *entry;
662 	u32 len;
663 
664 	len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata));
665 	entry = (void *)gen_pool_alloc(ghes_estatus_pool, len);
666 	if (!entry)
667 		return;
668 
669 	copied_gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry);
670 	memcpy(copied_gdata, gdata, acpi_hest_get_record_size(gdata));
671 	entry->error_severity = sev;
672 
673 	INIT_WORK(&entry->work, ghes_vendor_record_work_func);
674 	schedule_work(&entry->work);
675 }
676 
677 /*
678  * Only a single callback can be registered for CXL CPER events.
679  */
680 static DECLARE_RWSEM(cxl_cper_rw_sem);
681 static cxl_cper_callback cper_callback;
682 
683 /* CXL Event record UUIDs are formatted as GUIDs and reported in section type */
684 
685 /*
686  * General Media Event Record
687  * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
688  */
689 #define CPER_SEC_CXL_GEN_MEDIA_GUID					\
690 	GUID_INIT(0xfbcd0a77, 0xc260, 0x417f,				\
691 		  0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6)
692 
693 /*
694  * DRAM Event Record
695  * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
696  */
697 #define CPER_SEC_CXL_DRAM_GUID						\
698 	GUID_INIT(0x601dcbb3, 0x9c06, 0x4eab,				\
699 		  0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24)
700 
701 /*
702  * Memory Module Event Record
703  * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
704  */
705 #define CPER_SEC_CXL_MEM_MODULE_GUID					\
706 	GUID_INIT(0xfe927475, 0xdd59, 0x4339,				\
707 		  0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74)
708 
709 static void cxl_cper_post_event(enum cxl_event_type event_type,
710 				struct cxl_cper_event_rec *rec)
711 {
712 	if (rec->hdr.length <= sizeof(rec->hdr) ||
713 	    rec->hdr.length > sizeof(*rec)) {
714 		pr_err(FW_WARN "CXL CPER Invalid section length (%u)\n",
715 		       rec->hdr.length);
716 		return;
717 	}
718 
719 	if (!(rec->hdr.validation_bits & CPER_CXL_COMP_EVENT_LOG_VALID)) {
720 		pr_err(FW_WARN "CXL CPER invalid event\n");
721 		return;
722 	}
723 
724 	guard(rwsem_read)(&cxl_cper_rw_sem);
725 	if (cper_callback)
726 		cper_callback(event_type, rec);
727 }
728 
729 int cxl_cper_register_callback(cxl_cper_callback callback)
730 {
731 	guard(rwsem_write)(&cxl_cper_rw_sem);
732 	if (cper_callback)
733 		return -EINVAL;
734 	cper_callback = callback;
735 	return 0;
736 }
737 EXPORT_SYMBOL_NS_GPL(cxl_cper_register_callback, CXL);
738 
739 int cxl_cper_unregister_callback(cxl_cper_callback callback)
740 {
741 	guard(rwsem_write)(&cxl_cper_rw_sem);
742 	if (callback != cper_callback)
743 		return -EINVAL;
744 	cper_callback = NULL;
745 	return 0;
746 }
747 EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_callback, CXL);
748 
749 static bool ghes_do_proc(struct ghes *ghes,
750 			 const struct acpi_hest_generic_status *estatus)
751 {
752 	int sev, sec_sev;
753 	struct acpi_hest_generic_data *gdata;
754 	guid_t *sec_type;
755 	const guid_t *fru_id = &guid_null;
756 	char *fru_text = "";
757 	bool queued = false;
758 	bool sync = is_hest_sync_notify(ghes);
759 
760 	sev = ghes_severity(estatus->error_severity);
761 	apei_estatus_for_each_section(estatus, gdata) {
762 		sec_type = (guid_t *)gdata->section_type;
763 		sec_sev = ghes_severity(gdata->error_severity);
764 		if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
765 			fru_id = (guid_t *)gdata->fru_id;
766 
767 		if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
768 			fru_text = gdata->fru_text;
769 
770 		if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
771 			struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
772 
773 			atomic_notifier_call_chain(&ghes_report_chain, sev, mem_err);
774 
775 			arch_apei_report_mem_error(sev, mem_err);
776 			queued = ghes_handle_memory_failure(gdata, sev, sync);
777 		}
778 		else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
779 			ghes_handle_aer(gdata);
780 		}
781 		else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
782 			queued = ghes_handle_arm_hw_error(gdata, sev, sync);
783 		} else if (guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID)) {
784 			struct cxl_cper_event_rec *rec =
785 				acpi_hest_get_payload(gdata);
786 
787 			cxl_cper_post_event(CXL_CPER_EVENT_GEN_MEDIA, rec);
788 		} else if (guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID)) {
789 			struct cxl_cper_event_rec *rec =
790 				acpi_hest_get_payload(gdata);
791 
792 			cxl_cper_post_event(CXL_CPER_EVENT_DRAM, rec);
793 		} else if (guid_equal(sec_type,
794 				      &CPER_SEC_CXL_MEM_MODULE_GUID)) {
795 			struct cxl_cper_event_rec *rec =
796 				acpi_hest_get_payload(gdata);
797 
798 			cxl_cper_post_event(CXL_CPER_EVENT_MEM_MODULE, rec);
799 		} else {
800 			void *err = acpi_hest_get_payload(gdata);
801 
802 			ghes_defer_non_standard_event(gdata, sev);
803 			log_non_standard_event(sec_type, fru_id, fru_text,
804 					       sec_sev, err,
805 					       gdata->error_data_length);
806 		}
807 	}
808 
809 	return queued;
810 }
811 
812 static void __ghes_print_estatus(const char *pfx,
813 				 const struct acpi_hest_generic *generic,
814 				 const struct acpi_hest_generic_status *estatus)
815 {
816 	static atomic_t seqno;
817 	unsigned int curr_seqno;
818 	char pfx_seq[64];
819 
820 	if (pfx == NULL) {
821 		if (ghes_severity(estatus->error_severity) <=
822 		    GHES_SEV_CORRECTED)
823 			pfx = KERN_WARNING;
824 		else
825 			pfx = KERN_ERR;
826 	}
827 	curr_seqno = atomic_inc_return(&seqno);
828 	snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
829 	printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
830 	       pfx_seq, generic->header.source_id);
831 	cper_estatus_print(pfx_seq, estatus);
832 }
833 
834 static int ghes_print_estatus(const char *pfx,
835 			      const struct acpi_hest_generic *generic,
836 			      const struct acpi_hest_generic_status *estatus)
837 {
838 	/* Not more than 2 messages every 5 seconds */
839 	static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
840 	static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2);
841 	struct ratelimit_state *ratelimit;
842 
843 	if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED)
844 		ratelimit = &ratelimit_corrected;
845 	else
846 		ratelimit = &ratelimit_uncorrected;
847 	if (__ratelimit(ratelimit)) {
848 		__ghes_print_estatus(pfx, generic, estatus);
849 		return 1;
850 	}
851 	return 0;
852 }
853 
854 /*
855  * GHES error status reporting throttle, to report more kinds of
856  * errors, instead of just most frequently occurred errors.
857  */
858 static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
859 {
860 	u32 len;
861 	int i, cached = 0;
862 	unsigned long long now;
863 	struct ghes_estatus_cache *cache;
864 	struct acpi_hest_generic_status *cache_estatus;
865 
866 	len = cper_estatus_len(estatus);
867 	rcu_read_lock();
868 	for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
869 		cache = rcu_dereference(ghes_estatus_caches[i]);
870 		if (cache == NULL)
871 			continue;
872 		if (len != cache->estatus_len)
873 			continue;
874 		cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
875 		if (memcmp(estatus, cache_estatus, len))
876 			continue;
877 		atomic_inc(&cache->count);
878 		now = sched_clock();
879 		if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC)
880 			cached = 1;
881 		break;
882 	}
883 	rcu_read_unlock();
884 	return cached;
885 }
886 
887 static struct ghes_estatus_cache *ghes_estatus_cache_alloc(
888 	struct acpi_hest_generic *generic,
889 	struct acpi_hest_generic_status *estatus)
890 {
891 	int alloced;
892 	u32 len, cache_len;
893 	struct ghes_estatus_cache *cache;
894 	struct acpi_hest_generic_status *cache_estatus;
895 
896 	alloced = atomic_add_return(1, &ghes_estatus_cache_alloced);
897 	if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) {
898 		atomic_dec(&ghes_estatus_cache_alloced);
899 		return NULL;
900 	}
901 	len = cper_estatus_len(estatus);
902 	cache_len = GHES_ESTATUS_CACHE_LEN(len);
903 	cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len);
904 	if (!cache) {
905 		atomic_dec(&ghes_estatus_cache_alloced);
906 		return NULL;
907 	}
908 	cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
909 	memcpy(cache_estatus, estatus, len);
910 	cache->estatus_len = len;
911 	atomic_set(&cache->count, 0);
912 	cache->generic = generic;
913 	cache->time_in = sched_clock();
914 	return cache;
915 }
916 
917 static void ghes_estatus_cache_rcu_free(struct rcu_head *head)
918 {
919 	struct ghes_estatus_cache *cache;
920 	u32 len;
921 
922 	cache = container_of(head, struct ghes_estatus_cache, rcu);
923 	len = cper_estatus_len(GHES_ESTATUS_FROM_CACHE(cache));
924 	len = GHES_ESTATUS_CACHE_LEN(len);
925 	gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len);
926 	atomic_dec(&ghes_estatus_cache_alloced);
927 }
928 
929 static void
930 ghes_estatus_cache_add(struct acpi_hest_generic *generic,
931 		       struct acpi_hest_generic_status *estatus)
932 {
933 	unsigned long long now, duration, period, max_period = 0;
934 	struct ghes_estatus_cache *cache, *new_cache;
935 	struct ghes_estatus_cache __rcu *victim;
936 	int i, slot = -1, count;
937 
938 	new_cache = ghes_estatus_cache_alloc(generic, estatus);
939 	if (!new_cache)
940 		return;
941 
942 	rcu_read_lock();
943 	now = sched_clock();
944 	for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
945 		cache = rcu_dereference(ghes_estatus_caches[i]);
946 		if (cache == NULL) {
947 			slot = i;
948 			break;
949 		}
950 		duration = now - cache->time_in;
951 		if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) {
952 			slot = i;
953 			break;
954 		}
955 		count = atomic_read(&cache->count);
956 		period = duration;
957 		do_div(period, (count + 1));
958 		if (period > max_period) {
959 			max_period = period;
960 			slot = i;
961 		}
962 	}
963 	rcu_read_unlock();
964 
965 	if (slot != -1) {
966 		/*
967 		 * Use release semantics to ensure that ghes_estatus_cached()
968 		 * running on another CPU will see the updated cache fields if
969 		 * it can see the new value of the pointer.
970 		 */
971 		victim = xchg_release(&ghes_estatus_caches[slot],
972 				      RCU_INITIALIZER(new_cache));
973 
974 		/*
975 		 * At this point, victim may point to a cached item different
976 		 * from the one based on which we selected the slot. Instead of
977 		 * going to the loop again to pick another slot, let's just
978 		 * drop the other item anyway: this may cause a false cache
979 		 * miss later on, but that won't cause any problems.
980 		 */
981 		if (victim)
982 			call_rcu(&unrcu_pointer(victim)->rcu,
983 				 ghes_estatus_cache_rcu_free);
984 	}
985 }
986 
987 static void __ghes_panic(struct ghes *ghes,
988 			 struct acpi_hest_generic_status *estatus,
989 			 u64 buf_paddr, enum fixed_addresses fixmap_idx)
990 {
991 	__ghes_print_estatus(KERN_EMERG, ghes->generic, estatus);
992 
993 	ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
994 
995 	/* reboot to log the error! */
996 	if (!panic_timeout)
997 		panic_timeout = ghes_panic_timeout;
998 	panic("Fatal hardware error!");
999 }
1000 
1001 static int ghes_proc(struct ghes *ghes)
1002 {
1003 	struct acpi_hest_generic_status *estatus = ghes->estatus;
1004 	u64 buf_paddr;
1005 	int rc;
1006 
1007 	rc = ghes_read_estatus(ghes, estatus, &buf_paddr, FIX_APEI_GHES_IRQ);
1008 	if (rc)
1009 		goto out;
1010 
1011 	if (ghes_severity(estatus->error_severity) >= GHES_SEV_PANIC)
1012 		__ghes_panic(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ);
1013 
1014 	if (!ghes_estatus_cached(estatus)) {
1015 		if (ghes_print_estatus(NULL, ghes->generic, estatus))
1016 			ghes_estatus_cache_add(ghes->generic, estatus);
1017 	}
1018 	ghes_do_proc(ghes, estatus);
1019 
1020 out:
1021 	ghes_clear_estatus(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ);
1022 
1023 	return rc;
1024 }
1025 
1026 static void ghes_add_timer(struct ghes *ghes)
1027 {
1028 	struct acpi_hest_generic *g = ghes->generic;
1029 	unsigned long expire;
1030 
1031 	if (!g->notify.poll_interval) {
1032 		pr_warn(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n",
1033 			g->header.source_id);
1034 		return;
1035 	}
1036 	expire = jiffies + msecs_to_jiffies(g->notify.poll_interval);
1037 	ghes->timer.expires = round_jiffies_relative(expire);
1038 	add_timer(&ghes->timer);
1039 }
1040 
1041 static void ghes_poll_func(struct timer_list *t)
1042 {
1043 	struct ghes *ghes = from_timer(ghes, t, timer);
1044 	unsigned long flags;
1045 
1046 	spin_lock_irqsave(&ghes_notify_lock_irq, flags);
1047 	ghes_proc(ghes);
1048 	spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
1049 	if (!(ghes->flags & GHES_EXITING))
1050 		ghes_add_timer(ghes);
1051 }
1052 
1053 static irqreturn_t ghes_irq_func(int irq, void *data)
1054 {
1055 	struct ghes *ghes = data;
1056 	unsigned long flags;
1057 	int rc;
1058 
1059 	spin_lock_irqsave(&ghes_notify_lock_irq, flags);
1060 	rc = ghes_proc(ghes);
1061 	spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
1062 	if (rc)
1063 		return IRQ_NONE;
1064 
1065 	return IRQ_HANDLED;
1066 }
1067 
1068 static int ghes_notify_hed(struct notifier_block *this, unsigned long event,
1069 			   void *data)
1070 {
1071 	struct ghes *ghes;
1072 	unsigned long flags;
1073 	int ret = NOTIFY_DONE;
1074 
1075 	spin_lock_irqsave(&ghes_notify_lock_irq, flags);
1076 	rcu_read_lock();
1077 	list_for_each_entry_rcu(ghes, &ghes_hed, list) {
1078 		if (!ghes_proc(ghes))
1079 			ret = NOTIFY_OK;
1080 	}
1081 	rcu_read_unlock();
1082 	spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
1083 
1084 	return ret;
1085 }
1086 
1087 static struct notifier_block ghes_notifier_hed = {
1088 	.notifier_call = ghes_notify_hed,
1089 };
1090 
1091 /*
1092  * Handlers for CPER records may not be NMI safe. For example,
1093  * memory_failure_queue() takes spinlocks and calls schedule_work_on().
1094  * In any NMI-like handler, memory from ghes_estatus_pool is used to save
1095  * estatus, and added to the ghes_estatus_llist. irq_work_queue() causes
1096  * ghes_proc_in_irq() to run in IRQ context where each estatus in
1097  * ghes_estatus_llist is processed.
1098  *
1099  * Memory from the ghes_estatus_pool is also used with the ghes_estatus_cache
1100  * to suppress frequent messages.
1101  */
1102 static struct llist_head ghes_estatus_llist;
1103 static struct irq_work ghes_proc_irq_work;
1104 
1105 static void ghes_proc_in_irq(struct irq_work *irq_work)
1106 {
1107 	struct llist_node *llnode, *next;
1108 	struct ghes_estatus_node *estatus_node;
1109 	struct acpi_hest_generic *generic;
1110 	struct acpi_hest_generic_status *estatus;
1111 	bool task_work_pending;
1112 	u32 len, node_len;
1113 	int ret;
1114 
1115 	llnode = llist_del_all(&ghes_estatus_llist);
1116 	/*
1117 	 * Because the time order of estatus in list is reversed,
1118 	 * revert it back to proper order.
1119 	 */
1120 	llnode = llist_reverse_order(llnode);
1121 	while (llnode) {
1122 		next = llnode->next;
1123 		estatus_node = llist_entry(llnode, struct ghes_estatus_node,
1124 					   llnode);
1125 		estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
1126 		len = cper_estatus_len(estatus);
1127 		node_len = GHES_ESTATUS_NODE_LEN(len);
1128 		task_work_pending = ghes_do_proc(estatus_node->ghes, estatus);
1129 		if (!ghes_estatus_cached(estatus)) {
1130 			generic = estatus_node->generic;
1131 			if (ghes_print_estatus(NULL, generic, estatus))
1132 				ghes_estatus_cache_add(generic, estatus);
1133 		}
1134 
1135 		if (task_work_pending && current->mm) {
1136 			estatus_node->task_work.func = ghes_kick_task_work;
1137 			estatus_node->task_work_cpu = smp_processor_id();
1138 			ret = task_work_add(current, &estatus_node->task_work,
1139 					    TWA_RESUME);
1140 			if (ret)
1141 				estatus_node->task_work.func = NULL;
1142 		}
1143 
1144 		if (!estatus_node->task_work.func)
1145 			gen_pool_free(ghes_estatus_pool,
1146 				      (unsigned long)estatus_node, node_len);
1147 
1148 		llnode = next;
1149 	}
1150 }
1151 
1152 static void ghes_print_queued_estatus(void)
1153 {
1154 	struct llist_node *llnode;
1155 	struct ghes_estatus_node *estatus_node;
1156 	struct acpi_hest_generic *generic;
1157 	struct acpi_hest_generic_status *estatus;
1158 
1159 	llnode = llist_del_all(&ghes_estatus_llist);
1160 	/*
1161 	 * Because the time order of estatus in list is reversed,
1162 	 * revert it back to proper order.
1163 	 */
1164 	llnode = llist_reverse_order(llnode);
1165 	while (llnode) {
1166 		estatus_node = llist_entry(llnode, struct ghes_estatus_node,
1167 					   llnode);
1168 		estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
1169 		generic = estatus_node->generic;
1170 		ghes_print_estatus(NULL, generic, estatus);
1171 		llnode = llnode->next;
1172 	}
1173 }
1174 
1175 static int ghes_in_nmi_queue_one_entry(struct ghes *ghes,
1176 				       enum fixed_addresses fixmap_idx)
1177 {
1178 	struct acpi_hest_generic_status *estatus, tmp_header;
1179 	struct ghes_estatus_node *estatus_node;
1180 	u32 len, node_len;
1181 	u64 buf_paddr;
1182 	int sev, rc;
1183 
1184 	if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG))
1185 		return -EOPNOTSUPP;
1186 
1187 	rc = __ghes_peek_estatus(ghes, &tmp_header, &buf_paddr, fixmap_idx);
1188 	if (rc) {
1189 		ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
1190 		return rc;
1191 	}
1192 
1193 	rc = __ghes_check_estatus(ghes, &tmp_header);
1194 	if (rc) {
1195 		ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
1196 		return rc;
1197 	}
1198 
1199 	len = cper_estatus_len(&tmp_header);
1200 	node_len = GHES_ESTATUS_NODE_LEN(len);
1201 	estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, node_len);
1202 	if (!estatus_node)
1203 		return -ENOMEM;
1204 
1205 	estatus_node->ghes = ghes;
1206 	estatus_node->generic = ghes->generic;
1207 	estatus_node->task_work.func = NULL;
1208 	estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
1209 
1210 	if (__ghes_read_estatus(estatus, buf_paddr, fixmap_idx, len)) {
1211 		ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
1212 		rc = -ENOENT;
1213 		goto no_work;
1214 	}
1215 
1216 	sev = ghes_severity(estatus->error_severity);
1217 	if (sev >= GHES_SEV_PANIC) {
1218 		ghes_print_queued_estatus();
1219 		__ghes_panic(ghes, estatus, buf_paddr, fixmap_idx);
1220 	}
1221 
1222 	ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
1223 
1224 	/* This error has been reported before, don't process it again. */
1225 	if (ghes_estatus_cached(estatus))
1226 		goto no_work;
1227 
1228 	llist_add(&estatus_node->llnode, &ghes_estatus_llist);
1229 
1230 	return rc;
1231 
1232 no_work:
1233 	gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
1234 		      node_len);
1235 
1236 	return rc;
1237 }
1238 
1239 static int ghes_in_nmi_spool_from_list(struct list_head *rcu_list,
1240 				       enum fixed_addresses fixmap_idx)
1241 {
1242 	int ret = -ENOENT;
1243 	struct ghes *ghes;
1244 
1245 	rcu_read_lock();
1246 	list_for_each_entry_rcu(ghes, rcu_list, list) {
1247 		if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx))
1248 			ret = 0;
1249 	}
1250 	rcu_read_unlock();
1251 
1252 	if (IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) && !ret)
1253 		irq_work_queue(&ghes_proc_irq_work);
1254 
1255 	return ret;
1256 }
1257 
1258 #ifdef CONFIG_ACPI_APEI_SEA
1259 static LIST_HEAD(ghes_sea);
1260 
1261 /*
1262  * Return 0 only if one of the SEA error sources successfully reported an error
1263  * record sent from the firmware.
1264  */
1265 int ghes_notify_sea(void)
1266 {
1267 	static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sea);
1268 	int rv;
1269 
1270 	raw_spin_lock(&ghes_notify_lock_sea);
1271 	rv = ghes_in_nmi_spool_from_list(&ghes_sea, FIX_APEI_GHES_SEA);
1272 	raw_spin_unlock(&ghes_notify_lock_sea);
1273 
1274 	return rv;
1275 }
1276 
1277 static void ghes_sea_add(struct ghes *ghes)
1278 {
1279 	mutex_lock(&ghes_list_mutex);
1280 	list_add_rcu(&ghes->list, &ghes_sea);
1281 	mutex_unlock(&ghes_list_mutex);
1282 }
1283 
1284 static void ghes_sea_remove(struct ghes *ghes)
1285 {
1286 	mutex_lock(&ghes_list_mutex);
1287 	list_del_rcu(&ghes->list);
1288 	mutex_unlock(&ghes_list_mutex);
1289 	synchronize_rcu();
1290 }
1291 #else /* CONFIG_ACPI_APEI_SEA */
1292 static inline void ghes_sea_add(struct ghes *ghes) { }
1293 static inline void ghes_sea_remove(struct ghes *ghes) { }
1294 #endif /* CONFIG_ACPI_APEI_SEA */
1295 
1296 #ifdef CONFIG_HAVE_ACPI_APEI_NMI
1297 /*
1298  * NMI may be triggered on any CPU, so ghes_in_nmi is used for
1299  * having only one concurrent reader.
1300  */
1301 static atomic_t ghes_in_nmi = ATOMIC_INIT(0);
1302 
1303 static LIST_HEAD(ghes_nmi);
1304 
1305 static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
1306 {
1307 	static DEFINE_RAW_SPINLOCK(ghes_notify_lock_nmi);
1308 	int ret = NMI_DONE;
1309 
1310 	if (!atomic_add_unless(&ghes_in_nmi, 1, 1))
1311 		return ret;
1312 
1313 	raw_spin_lock(&ghes_notify_lock_nmi);
1314 	if (!ghes_in_nmi_spool_from_list(&ghes_nmi, FIX_APEI_GHES_NMI))
1315 		ret = NMI_HANDLED;
1316 	raw_spin_unlock(&ghes_notify_lock_nmi);
1317 
1318 	atomic_dec(&ghes_in_nmi);
1319 	return ret;
1320 }
1321 
1322 static void ghes_nmi_add(struct ghes *ghes)
1323 {
1324 	mutex_lock(&ghes_list_mutex);
1325 	if (list_empty(&ghes_nmi))
1326 		register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0, "ghes");
1327 	list_add_rcu(&ghes->list, &ghes_nmi);
1328 	mutex_unlock(&ghes_list_mutex);
1329 }
1330 
1331 static void ghes_nmi_remove(struct ghes *ghes)
1332 {
1333 	mutex_lock(&ghes_list_mutex);
1334 	list_del_rcu(&ghes->list);
1335 	if (list_empty(&ghes_nmi))
1336 		unregister_nmi_handler(NMI_LOCAL, "ghes");
1337 	mutex_unlock(&ghes_list_mutex);
1338 	/*
1339 	 * To synchronize with NMI handler, ghes can only be
1340 	 * freed after NMI handler finishes.
1341 	 */
1342 	synchronize_rcu();
1343 }
1344 #else /* CONFIG_HAVE_ACPI_APEI_NMI */
1345 static inline void ghes_nmi_add(struct ghes *ghes) { }
1346 static inline void ghes_nmi_remove(struct ghes *ghes) { }
1347 #endif /* CONFIG_HAVE_ACPI_APEI_NMI */
1348 
1349 static void ghes_nmi_init_cxt(void)
1350 {
1351 	init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq);
1352 }
1353 
1354 static int __ghes_sdei_callback(struct ghes *ghes,
1355 				enum fixed_addresses fixmap_idx)
1356 {
1357 	if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx)) {
1358 		irq_work_queue(&ghes_proc_irq_work);
1359 
1360 		return 0;
1361 	}
1362 
1363 	return -ENOENT;
1364 }
1365 
1366 static int ghes_sdei_normal_callback(u32 event_num, struct pt_regs *regs,
1367 				      void *arg)
1368 {
1369 	static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_normal);
1370 	struct ghes *ghes = arg;
1371 	int err;
1372 
1373 	raw_spin_lock(&ghes_notify_lock_sdei_normal);
1374 	err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_NORMAL);
1375 	raw_spin_unlock(&ghes_notify_lock_sdei_normal);
1376 
1377 	return err;
1378 }
1379 
1380 static int ghes_sdei_critical_callback(u32 event_num, struct pt_regs *regs,
1381 				       void *arg)
1382 {
1383 	static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_critical);
1384 	struct ghes *ghes = arg;
1385 	int err;
1386 
1387 	raw_spin_lock(&ghes_notify_lock_sdei_critical);
1388 	err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_CRITICAL);
1389 	raw_spin_unlock(&ghes_notify_lock_sdei_critical);
1390 
1391 	return err;
1392 }
1393 
1394 static int apei_sdei_register_ghes(struct ghes *ghes)
1395 {
1396 	if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
1397 		return -EOPNOTSUPP;
1398 
1399 	return sdei_register_ghes(ghes, ghes_sdei_normal_callback,
1400 				 ghes_sdei_critical_callback);
1401 }
1402 
1403 static int apei_sdei_unregister_ghes(struct ghes *ghes)
1404 {
1405 	if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
1406 		return -EOPNOTSUPP;
1407 
1408 	return sdei_unregister_ghes(ghes);
1409 }
1410 
1411 static int ghes_probe(struct platform_device *ghes_dev)
1412 {
1413 	struct acpi_hest_generic *generic;
1414 	struct ghes *ghes = NULL;
1415 	unsigned long flags;
1416 
1417 	int rc = -EINVAL;
1418 
1419 	generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
1420 	if (!generic->enabled)
1421 		return -ENODEV;
1422 
1423 	switch (generic->notify.type) {
1424 	case ACPI_HEST_NOTIFY_POLLED:
1425 	case ACPI_HEST_NOTIFY_EXTERNAL:
1426 	case ACPI_HEST_NOTIFY_SCI:
1427 	case ACPI_HEST_NOTIFY_GSIV:
1428 	case ACPI_HEST_NOTIFY_GPIO:
1429 		break;
1430 
1431 	case ACPI_HEST_NOTIFY_SEA:
1432 		if (!IS_ENABLED(CONFIG_ACPI_APEI_SEA)) {
1433 			pr_warn(GHES_PFX "Generic hardware error source: %d notified via SEA is not supported\n",
1434 				generic->header.source_id);
1435 			rc = -ENOTSUPP;
1436 			goto err;
1437 		}
1438 		break;
1439 	case ACPI_HEST_NOTIFY_NMI:
1440 		if (!IS_ENABLED(CONFIG_HAVE_ACPI_APEI_NMI)) {
1441 			pr_warn(GHES_PFX "Generic hardware error source: %d notified via NMI interrupt is not supported!\n",
1442 				generic->header.source_id);
1443 			goto err;
1444 		}
1445 		break;
1446 	case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1447 		if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE)) {
1448 			pr_warn(GHES_PFX "Generic hardware error source: %d notified via SDE Interface is not supported!\n",
1449 				generic->header.source_id);
1450 			goto err;
1451 		}
1452 		break;
1453 	case ACPI_HEST_NOTIFY_LOCAL:
1454 		pr_warn(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
1455 			generic->header.source_id);
1456 		goto err;
1457 	default:
1458 		pr_warn(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n",
1459 			generic->notify.type, generic->header.source_id);
1460 		goto err;
1461 	}
1462 
1463 	rc = -EIO;
1464 	if (generic->error_block_length <
1465 	    sizeof(struct acpi_hest_generic_status)) {
1466 		pr_warn(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
1467 			generic->error_block_length, generic->header.source_id);
1468 		goto err;
1469 	}
1470 	ghes = ghes_new(generic);
1471 	if (IS_ERR(ghes)) {
1472 		rc = PTR_ERR(ghes);
1473 		ghes = NULL;
1474 		goto err;
1475 	}
1476 
1477 	switch (generic->notify.type) {
1478 	case ACPI_HEST_NOTIFY_POLLED:
1479 		timer_setup(&ghes->timer, ghes_poll_func, 0);
1480 		ghes_add_timer(ghes);
1481 		break;
1482 	case ACPI_HEST_NOTIFY_EXTERNAL:
1483 		/* External interrupt vector is GSI */
1484 		rc = acpi_gsi_to_irq(generic->notify.vector, &ghes->irq);
1485 		if (rc) {
1486 			pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
1487 			       generic->header.source_id);
1488 			goto err;
1489 		}
1490 		rc = request_irq(ghes->irq, ghes_irq_func, IRQF_SHARED,
1491 				 "GHES IRQ", ghes);
1492 		if (rc) {
1493 			pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
1494 			       generic->header.source_id);
1495 			goto err;
1496 		}
1497 		break;
1498 
1499 	case ACPI_HEST_NOTIFY_SCI:
1500 	case ACPI_HEST_NOTIFY_GSIV:
1501 	case ACPI_HEST_NOTIFY_GPIO:
1502 		mutex_lock(&ghes_list_mutex);
1503 		if (list_empty(&ghes_hed))
1504 			register_acpi_hed_notifier(&ghes_notifier_hed);
1505 		list_add_rcu(&ghes->list, &ghes_hed);
1506 		mutex_unlock(&ghes_list_mutex);
1507 		break;
1508 
1509 	case ACPI_HEST_NOTIFY_SEA:
1510 		ghes_sea_add(ghes);
1511 		break;
1512 	case ACPI_HEST_NOTIFY_NMI:
1513 		ghes_nmi_add(ghes);
1514 		break;
1515 	case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1516 		rc = apei_sdei_register_ghes(ghes);
1517 		if (rc)
1518 			goto err;
1519 		break;
1520 	default:
1521 		BUG();
1522 	}
1523 
1524 	platform_set_drvdata(ghes_dev, ghes);
1525 
1526 	ghes->dev = &ghes_dev->dev;
1527 
1528 	mutex_lock(&ghes_devs_mutex);
1529 	list_add_tail(&ghes->elist, &ghes_devs);
1530 	mutex_unlock(&ghes_devs_mutex);
1531 
1532 	/* Handle any pending errors right away */
1533 	spin_lock_irqsave(&ghes_notify_lock_irq, flags);
1534 	ghes_proc(ghes);
1535 	spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
1536 
1537 	return 0;
1538 
1539 err:
1540 	if (ghes) {
1541 		ghes_fini(ghes);
1542 		kfree(ghes);
1543 	}
1544 	return rc;
1545 }
1546 
1547 static int ghes_remove(struct platform_device *ghes_dev)
1548 {
1549 	int rc;
1550 	struct ghes *ghes;
1551 	struct acpi_hest_generic *generic;
1552 
1553 	ghes = platform_get_drvdata(ghes_dev);
1554 	generic = ghes->generic;
1555 
1556 	ghes->flags |= GHES_EXITING;
1557 	switch (generic->notify.type) {
1558 	case ACPI_HEST_NOTIFY_POLLED:
1559 		timer_shutdown_sync(&ghes->timer);
1560 		break;
1561 	case ACPI_HEST_NOTIFY_EXTERNAL:
1562 		free_irq(ghes->irq, ghes);
1563 		break;
1564 
1565 	case ACPI_HEST_NOTIFY_SCI:
1566 	case ACPI_HEST_NOTIFY_GSIV:
1567 	case ACPI_HEST_NOTIFY_GPIO:
1568 		mutex_lock(&ghes_list_mutex);
1569 		list_del_rcu(&ghes->list);
1570 		if (list_empty(&ghes_hed))
1571 			unregister_acpi_hed_notifier(&ghes_notifier_hed);
1572 		mutex_unlock(&ghes_list_mutex);
1573 		synchronize_rcu();
1574 		break;
1575 
1576 	case ACPI_HEST_NOTIFY_SEA:
1577 		ghes_sea_remove(ghes);
1578 		break;
1579 	case ACPI_HEST_NOTIFY_NMI:
1580 		ghes_nmi_remove(ghes);
1581 		break;
1582 	case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1583 		rc = apei_sdei_unregister_ghes(ghes);
1584 		if (rc)
1585 			return rc;
1586 		break;
1587 	default:
1588 		BUG();
1589 		break;
1590 	}
1591 
1592 	ghes_fini(ghes);
1593 
1594 	mutex_lock(&ghes_devs_mutex);
1595 	list_del(&ghes->elist);
1596 	mutex_unlock(&ghes_devs_mutex);
1597 
1598 	kfree(ghes);
1599 
1600 	return 0;
1601 }
1602 
1603 static struct platform_driver ghes_platform_driver = {
1604 	.driver		= {
1605 		.name	= "GHES",
1606 	},
1607 	.probe		= ghes_probe,
1608 	.remove		= ghes_remove,
1609 };
1610 
1611 void __init acpi_ghes_init(void)
1612 {
1613 	int rc;
1614 
1615 	sdei_init();
1616 
1617 	if (acpi_disabled)
1618 		return;
1619 
1620 	switch (hest_disable) {
1621 	case HEST_NOT_FOUND:
1622 		return;
1623 	case HEST_DISABLED:
1624 		pr_info(GHES_PFX "HEST is not enabled!\n");
1625 		return;
1626 	default:
1627 		break;
1628 	}
1629 
1630 	if (ghes_disable) {
1631 		pr_info(GHES_PFX "GHES is not enabled!\n");
1632 		return;
1633 	}
1634 
1635 	ghes_nmi_init_cxt();
1636 
1637 	rc = platform_driver_register(&ghes_platform_driver);
1638 	if (rc)
1639 		return;
1640 
1641 	rc = apei_osc_setup();
1642 	if (rc == 0 && osc_sb_apei_support_acked)
1643 		pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n");
1644 	else if (rc == 0 && !osc_sb_apei_support_acked)
1645 		pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n");
1646 	else if (rc && osc_sb_apei_support_acked)
1647 		pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n");
1648 	else
1649 		pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n");
1650 }
1651 
1652 /*
1653  * Known x86 systems that prefer GHES error reporting:
1654  */
1655 static struct acpi_platform_list plat_list[] = {
1656 	{"HPE   ", "Server  ", 0, ACPI_SIG_FADT, all_versions},
1657 	{ } /* End */
1658 };
1659 
1660 struct list_head *ghes_get_devices(void)
1661 {
1662 	int idx = -1;
1663 
1664 	if (IS_ENABLED(CONFIG_X86)) {
1665 		idx = acpi_match_platform_list(plat_list);
1666 		if (idx < 0) {
1667 			if (!ghes_edac_force_enable)
1668 				return NULL;
1669 
1670 			pr_warn_once("Force-loading ghes_edac on an unsupported platform. You're on your own!\n");
1671 		}
1672 	} else if (list_empty(&ghes_devs)) {
1673 		return NULL;
1674 	}
1675 
1676 	return &ghes_devs;
1677 }
1678 EXPORT_SYMBOL_GPL(ghes_get_devices);
1679 
1680 void ghes_register_report_chain(struct notifier_block *nb)
1681 {
1682 	atomic_notifier_chain_register(&ghes_report_chain, nb);
1683 }
1684 EXPORT_SYMBOL_GPL(ghes_register_report_chain);
1685 
1686 void ghes_unregister_report_chain(struct notifier_block *nb)
1687 {
1688 	atomic_notifier_chain_unregister(&ghes_report_chain, nb);
1689 }
1690 EXPORT_SYMBOL_GPL(ghes_unregister_report_chain);
1691