xref: /linux/drivers/acpi/apei/ghes.c (revision c48a7c44a1d02516309015b6134c9bb982e17008)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * APEI Generic Hardware Error Source support
4  *
5  * Generic Hardware Error Source provides a way to report platform
6  * hardware errors (such as that from chipset). It works in so called
7  * "Firmware First" mode, that is, hardware errors are reported to
8  * firmware firstly, then reported to Linux by firmware. This way,
9  * some non-standard hardware error registers or non-standard hardware
10  * link can be checked by firmware to produce more hardware error
11  * information for Linux.
12  *
13  * For more information about Generic Hardware Error Source, please
14  * refer to ACPI Specification version 4.0, section 17.3.2.6
15  *
16  * Copyright 2010,2011 Intel Corp.
17  *   Author: Huang Ying <ying.huang@intel.com>
18  */
19 
20 #include <linux/arm_sdei.h>
21 #include <linux/kernel.h>
22 #include <linux/moduleparam.h>
23 #include <linux/init.h>
24 #include <linux/acpi.h>
25 #include <linux/io.h>
26 #include <linux/interrupt.h>
27 #include <linux/timer.h>
28 #include <linux/cper.h>
29 #include <linux/platform_device.h>
30 #include <linux/mutex.h>
31 #include <linux/ratelimit.h>
32 #include <linux/vmalloc.h>
33 #include <linux/irq_work.h>
34 #include <linux/llist.h>
35 #include <linux/genalloc.h>
36 #include <linux/pci.h>
37 #include <linux/pfn.h>
38 #include <linux/aer.h>
39 #include <linux/nmi.h>
40 #include <linux/sched/clock.h>
41 #include <linux/uuid.h>
42 #include <linux/ras.h>
43 #include <linux/task_work.h>
44 
45 #include <acpi/actbl1.h>
46 #include <acpi/ghes.h>
47 #include <acpi/apei.h>
48 #include <asm/fixmap.h>
49 #include <asm/tlbflush.h>
50 #include <ras/ras_event.h>
51 
52 #include "apei-internal.h"
53 
54 #define GHES_PFX	"GHES: "
55 
56 #define GHES_ESTATUS_MAX_SIZE		65536
57 #define GHES_ESOURCE_PREALLOC_MAX_SIZE	65536
58 
59 #define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3
60 
61 /* This is just an estimation for memory pool allocation */
62 #define GHES_ESTATUS_CACHE_AVG_SIZE	512
63 
64 #define GHES_ESTATUS_CACHES_SIZE	4
65 
66 #define GHES_ESTATUS_IN_CACHE_MAX_NSEC	10000000000ULL
67 /* Prevent too many caches are allocated because of RCU */
68 #define GHES_ESTATUS_CACHE_ALLOCED_MAX	(GHES_ESTATUS_CACHES_SIZE * 3 / 2)
69 
70 #define GHES_ESTATUS_CACHE_LEN(estatus_len)			\
71 	(sizeof(struct ghes_estatus_cache) + (estatus_len))
72 #define GHES_ESTATUS_FROM_CACHE(estatus_cache)			\
73 	((struct acpi_hest_generic_status *)				\
74 	 ((struct ghes_estatus_cache *)(estatus_cache) + 1))
75 
76 #define GHES_ESTATUS_NODE_LEN(estatus_len)			\
77 	(sizeof(struct ghes_estatus_node) + (estatus_len))
78 #define GHES_ESTATUS_FROM_NODE(estatus_node)			\
79 	((struct acpi_hest_generic_status *)				\
80 	 ((struct ghes_estatus_node *)(estatus_node) + 1))
81 
82 #define GHES_VENDOR_ENTRY_LEN(gdata_len)                               \
83 	(sizeof(struct ghes_vendor_record_entry) + (gdata_len))
84 #define GHES_GDATA_FROM_VENDOR_ENTRY(vendor_entry)                     \
85 	((struct acpi_hest_generic_data *)                              \
86 	((struct ghes_vendor_record_entry *)(vendor_entry) + 1))
87 
88 /*
89  *  NMI-like notifications vary by architecture, before the compiler can prune
90  *  unused static functions it needs a value for these enums.
91  */
92 #ifndef CONFIG_ARM_SDE_INTERFACE
93 #define FIX_APEI_GHES_SDEI_NORMAL	__end_of_fixed_addresses
94 #define FIX_APEI_GHES_SDEI_CRITICAL	__end_of_fixed_addresses
95 #endif
96 
97 static ATOMIC_NOTIFIER_HEAD(ghes_report_chain);
98 
99 static inline bool is_hest_type_generic_v2(struct ghes *ghes)
100 {
101 	return ghes->generic->header.type == ACPI_HEST_TYPE_GENERIC_ERROR_V2;
102 }
103 
104 /*
105  * This driver isn't really modular, however for the time being,
106  * continuing to use module_param is the easiest way to remain
107  * compatible with existing boot arg use cases.
108  */
109 bool ghes_disable;
110 module_param_named(disable, ghes_disable, bool, 0);
111 
112 /*
113  * "ghes.edac_force_enable" forcibly enables ghes_edac and skips the platform
114  * check.
115  */
116 static bool ghes_edac_force_enable;
117 module_param_named(edac_force_enable, ghes_edac_force_enable, bool, 0);
118 
119 /*
120  * All error sources notified with HED (Hardware Error Device) share a
121  * single notifier callback, so they need to be linked and checked one
122  * by one. This holds true for NMI too.
123  *
124  * RCU is used for these lists, so ghes_list_mutex is only used for
125  * list changing, not for traversing.
126  */
127 static LIST_HEAD(ghes_hed);
128 static DEFINE_MUTEX(ghes_list_mutex);
129 
130 /*
131  * A list of GHES devices which are given to the corresponding EDAC driver
132  * ghes_edac for further use.
133  */
134 static LIST_HEAD(ghes_devs);
135 static DEFINE_MUTEX(ghes_devs_mutex);
136 
137 /*
138  * Because the memory area used to transfer hardware error information
139  * from BIOS to Linux can be determined only in NMI, IRQ or timer
140  * handler, but general ioremap can not be used in atomic context, so
141  * the fixmap is used instead.
142  *
143  * This spinlock is used to prevent the fixmap entry from being used
144  * simultaneously.
145  */
146 static DEFINE_SPINLOCK(ghes_notify_lock_irq);
147 
148 struct ghes_vendor_record_entry {
149 	struct work_struct work;
150 	int error_severity;
151 	char vendor_record[];
152 };
153 
154 static struct gen_pool *ghes_estatus_pool;
155 
156 static struct ghes_estatus_cache __rcu *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
157 static atomic_t ghes_estatus_cache_alloced;
158 
159 static int ghes_panic_timeout __read_mostly = 30;
160 
161 static void __iomem *ghes_map(u64 pfn, enum fixed_addresses fixmap_idx)
162 {
163 	phys_addr_t paddr;
164 	pgprot_t prot;
165 
166 	paddr = PFN_PHYS(pfn);
167 	prot = arch_apei_get_mem_attribute(paddr);
168 	__set_fixmap(fixmap_idx, paddr, prot);
169 
170 	return (void __iomem *) __fix_to_virt(fixmap_idx);
171 }
172 
173 static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx)
174 {
175 	int _idx = virt_to_fix((unsigned long)vaddr);
176 
177 	WARN_ON_ONCE(fixmap_idx != _idx);
178 	clear_fixmap(fixmap_idx);
179 }
180 
181 int ghes_estatus_pool_init(unsigned int num_ghes)
182 {
183 	unsigned long addr, len;
184 	int rc;
185 
186 	ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1);
187 	if (!ghes_estatus_pool)
188 		return -ENOMEM;
189 
190 	len = GHES_ESTATUS_CACHE_AVG_SIZE * GHES_ESTATUS_CACHE_ALLOCED_MAX;
191 	len += (num_ghes * GHES_ESOURCE_PREALLOC_MAX_SIZE);
192 
193 	addr = (unsigned long)vmalloc(PAGE_ALIGN(len));
194 	if (!addr)
195 		goto err_pool_alloc;
196 
197 	rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
198 	if (rc)
199 		goto err_pool_add;
200 
201 	return 0;
202 
203 err_pool_add:
204 	vfree((void *)addr);
205 
206 err_pool_alloc:
207 	gen_pool_destroy(ghes_estatus_pool);
208 
209 	return -ENOMEM;
210 }
211 
212 /**
213  * ghes_estatus_pool_region_free - free previously allocated memory
214  *				   from the ghes_estatus_pool.
215  * @addr: address of memory to free.
216  * @size: size of memory to free.
217  *
218  * Returns none.
219  */
220 void ghes_estatus_pool_region_free(unsigned long addr, u32 size)
221 {
222 	gen_pool_free(ghes_estatus_pool, addr, size);
223 }
224 EXPORT_SYMBOL_GPL(ghes_estatus_pool_region_free);
225 
226 static int map_gen_v2(struct ghes *ghes)
227 {
228 	return apei_map_generic_address(&ghes->generic_v2->read_ack_register);
229 }
230 
231 static void unmap_gen_v2(struct ghes *ghes)
232 {
233 	apei_unmap_generic_address(&ghes->generic_v2->read_ack_register);
234 }
235 
236 static void ghes_ack_error(struct acpi_hest_generic_v2 *gv2)
237 {
238 	int rc;
239 	u64 val = 0;
240 
241 	rc = apei_read(&val, &gv2->read_ack_register);
242 	if (rc)
243 		return;
244 
245 	val &= gv2->read_ack_preserve << gv2->read_ack_register.bit_offset;
246 	val |= gv2->read_ack_write    << gv2->read_ack_register.bit_offset;
247 
248 	apei_write(val, &gv2->read_ack_register);
249 }
250 
251 static struct ghes *ghes_new(struct acpi_hest_generic *generic)
252 {
253 	struct ghes *ghes;
254 	unsigned int error_block_length;
255 	int rc;
256 
257 	ghes = kzalloc(sizeof(*ghes), GFP_KERNEL);
258 	if (!ghes)
259 		return ERR_PTR(-ENOMEM);
260 
261 	ghes->generic = generic;
262 	if (is_hest_type_generic_v2(ghes)) {
263 		rc = map_gen_v2(ghes);
264 		if (rc)
265 			goto err_free;
266 	}
267 
268 	rc = apei_map_generic_address(&generic->error_status_address);
269 	if (rc)
270 		goto err_unmap_read_ack_addr;
271 	error_block_length = generic->error_block_length;
272 	if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
273 		pr_warn(FW_WARN GHES_PFX
274 			"Error status block length is too long: %u for "
275 			"generic hardware error source: %d.\n",
276 			error_block_length, generic->header.source_id);
277 		error_block_length = GHES_ESTATUS_MAX_SIZE;
278 	}
279 	ghes->estatus = kmalloc(error_block_length, GFP_KERNEL);
280 	if (!ghes->estatus) {
281 		rc = -ENOMEM;
282 		goto err_unmap_status_addr;
283 	}
284 
285 	return ghes;
286 
287 err_unmap_status_addr:
288 	apei_unmap_generic_address(&generic->error_status_address);
289 err_unmap_read_ack_addr:
290 	if (is_hest_type_generic_v2(ghes))
291 		unmap_gen_v2(ghes);
292 err_free:
293 	kfree(ghes);
294 	return ERR_PTR(rc);
295 }
296 
297 static void ghes_fini(struct ghes *ghes)
298 {
299 	kfree(ghes->estatus);
300 	apei_unmap_generic_address(&ghes->generic->error_status_address);
301 	if (is_hest_type_generic_v2(ghes))
302 		unmap_gen_v2(ghes);
303 }
304 
305 static inline int ghes_severity(int severity)
306 {
307 	switch (severity) {
308 	case CPER_SEV_INFORMATIONAL:
309 		return GHES_SEV_NO;
310 	case CPER_SEV_CORRECTED:
311 		return GHES_SEV_CORRECTED;
312 	case CPER_SEV_RECOVERABLE:
313 		return GHES_SEV_RECOVERABLE;
314 	case CPER_SEV_FATAL:
315 		return GHES_SEV_PANIC;
316 	default:
317 		/* Unknown, go panic */
318 		return GHES_SEV_PANIC;
319 	}
320 }
321 
322 static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
323 				  int from_phys,
324 				  enum fixed_addresses fixmap_idx)
325 {
326 	void __iomem *vaddr;
327 	u64 offset;
328 	u32 trunk;
329 
330 	while (len > 0) {
331 		offset = paddr - (paddr & PAGE_MASK);
332 		vaddr = ghes_map(PHYS_PFN(paddr), fixmap_idx);
333 		trunk = PAGE_SIZE - offset;
334 		trunk = min(trunk, len);
335 		if (from_phys)
336 			memcpy_fromio(buffer, vaddr + offset, trunk);
337 		else
338 			memcpy_toio(vaddr + offset, buffer, trunk);
339 		len -= trunk;
340 		paddr += trunk;
341 		buffer += trunk;
342 		ghes_unmap(vaddr, fixmap_idx);
343 	}
344 }
345 
346 /* Check the top-level record header has an appropriate size. */
347 static int __ghes_check_estatus(struct ghes *ghes,
348 				struct acpi_hest_generic_status *estatus)
349 {
350 	u32 len = cper_estatus_len(estatus);
351 
352 	if (len < sizeof(*estatus)) {
353 		pr_warn_ratelimited(FW_WARN GHES_PFX "Truncated error status block!\n");
354 		return -EIO;
355 	}
356 
357 	if (len > ghes->generic->error_block_length) {
358 		pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid error status block length!\n");
359 		return -EIO;
360 	}
361 
362 	if (cper_estatus_check_header(estatus)) {
363 		pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid CPER header!\n");
364 		return -EIO;
365 	}
366 
367 	return 0;
368 }
369 
370 /* Read the CPER block, returning its address, and header in estatus. */
371 static int __ghes_peek_estatus(struct ghes *ghes,
372 			       struct acpi_hest_generic_status *estatus,
373 			       u64 *buf_paddr, enum fixed_addresses fixmap_idx)
374 {
375 	struct acpi_hest_generic *g = ghes->generic;
376 	int rc;
377 
378 	rc = apei_read(buf_paddr, &g->error_status_address);
379 	if (rc) {
380 		*buf_paddr = 0;
381 		pr_warn_ratelimited(FW_WARN GHES_PFX
382 "Failed to read error status block address for hardware error source: %d.\n",
383 				   g->header.source_id);
384 		return -EIO;
385 	}
386 	if (!*buf_paddr)
387 		return -ENOENT;
388 
389 	ghes_copy_tofrom_phys(estatus, *buf_paddr, sizeof(*estatus), 1,
390 			      fixmap_idx);
391 	if (!estatus->block_status) {
392 		*buf_paddr = 0;
393 		return -ENOENT;
394 	}
395 
396 	return 0;
397 }
398 
399 static int __ghes_read_estatus(struct acpi_hest_generic_status *estatus,
400 			       u64 buf_paddr, enum fixed_addresses fixmap_idx,
401 			       size_t buf_len)
402 {
403 	ghes_copy_tofrom_phys(estatus, buf_paddr, buf_len, 1, fixmap_idx);
404 	if (cper_estatus_check(estatus)) {
405 		pr_warn_ratelimited(FW_WARN GHES_PFX
406 				    "Failed to read error status block!\n");
407 		return -EIO;
408 	}
409 
410 	return 0;
411 }
412 
413 static int ghes_read_estatus(struct ghes *ghes,
414 			     struct acpi_hest_generic_status *estatus,
415 			     u64 *buf_paddr, enum fixed_addresses fixmap_idx)
416 {
417 	int rc;
418 
419 	rc = __ghes_peek_estatus(ghes, estatus, buf_paddr, fixmap_idx);
420 	if (rc)
421 		return rc;
422 
423 	rc = __ghes_check_estatus(ghes, estatus);
424 	if (rc)
425 		return rc;
426 
427 	return __ghes_read_estatus(estatus, *buf_paddr, fixmap_idx,
428 				   cper_estatus_len(estatus));
429 }
430 
431 static void ghes_clear_estatus(struct ghes *ghes,
432 			       struct acpi_hest_generic_status *estatus,
433 			       u64 buf_paddr, enum fixed_addresses fixmap_idx)
434 {
435 	estatus->block_status = 0;
436 
437 	if (!buf_paddr)
438 		return;
439 
440 	ghes_copy_tofrom_phys(estatus, buf_paddr,
441 			      sizeof(estatus->block_status), 0,
442 			      fixmap_idx);
443 
444 	/*
445 	 * GHESv2 type HEST entries introduce support for error acknowledgment,
446 	 * so only acknowledge the error if this support is present.
447 	 */
448 	if (is_hest_type_generic_v2(ghes))
449 		ghes_ack_error(ghes->generic_v2);
450 }
451 
452 /*
453  * Called as task_work before returning to user-space.
454  * Ensure any queued work has been done before we return to the context that
455  * triggered the notification.
456  */
457 static void ghes_kick_task_work(struct callback_head *head)
458 {
459 	struct acpi_hest_generic_status *estatus;
460 	struct ghes_estatus_node *estatus_node;
461 	u32 node_len;
462 
463 	estatus_node = container_of(head, struct ghes_estatus_node, task_work);
464 	if (IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
465 		memory_failure_queue_kick(estatus_node->task_work_cpu);
466 
467 	estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
468 	node_len = GHES_ESTATUS_NODE_LEN(cper_estatus_len(estatus));
469 	gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len);
470 }
471 
472 static bool ghes_do_memory_failure(u64 physical_addr, int flags)
473 {
474 	unsigned long pfn;
475 
476 	if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
477 		return false;
478 
479 	pfn = PHYS_PFN(physical_addr);
480 	if (!pfn_valid(pfn) && !arch_is_platform_page(physical_addr)) {
481 		pr_warn_ratelimited(FW_WARN GHES_PFX
482 		"Invalid address in generic error data: %#llx\n",
483 		physical_addr);
484 		return false;
485 	}
486 
487 	memory_failure_queue(pfn, flags);
488 	return true;
489 }
490 
491 static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
492 				       int sev)
493 {
494 	int flags = -1;
495 	int sec_sev = ghes_severity(gdata->error_severity);
496 	struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
497 
498 	if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
499 		return false;
500 
501 	/* iff following two events can be handled properly by now */
502 	if (sec_sev == GHES_SEV_CORRECTED &&
503 	    (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED))
504 		flags = MF_SOFT_OFFLINE;
505 	if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE)
506 		flags = 0;
507 
508 	if (flags != -1)
509 		return ghes_do_memory_failure(mem_err->physical_addr, flags);
510 
511 	return false;
512 }
513 
514 static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata, int sev)
515 {
516 	struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
517 	bool queued = false;
518 	int sec_sev, i;
519 	char *p;
520 
521 	log_arm_hw_error(err);
522 
523 	sec_sev = ghes_severity(gdata->error_severity);
524 	if (sev != GHES_SEV_RECOVERABLE || sec_sev != GHES_SEV_RECOVERABLE)
525 		return false;
526 
527 	p = (char *)(err + 1);
528 	for (i = 0; i < err->err_info_num; i++) {
529 		struct cper_arm_err_info *err_info = (struct cper_arm_err_info *)p;
530 		bool is_cache = (err_info->type == CPER_ARM_CACHE_ERROR);
531 		bool has_pa = (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR);
532 		const char *error_type = "unknown error";
533 
534 		/*
535 		 * The field (err_info->error_info & BIT(26)) is fixed to set to
536 		 * 1 in some old firmware of HiSilicon Kunpeng920. We assume that
537 		 * firmware won't mix corrected errors in an uncorrected section,
538 		 * and don't filter out 'corrected' error here.
539 		 */
540 		if (is_cache && has_pa) {
541 			queued = ghes_do_memory_failure(err_info->physical_fault_addr, 0);
542 			p += err_info->length;
543 			continue;
544 		}
545 
546 		if (err_info->type < ARRAY_SIZE(cper_proc_error_type_strs))
547 			error_type = cper_proc_error_type_strs[err_info->type];
548 
549 		pr_warn_ratelimited(FW_WARN GHES_PFX
550 				    "Unhandled processor error type: %s\n",
551 				    error_type);
552 		p += err_info->length;
553 	}
554 
555 	return queued;
556 }
557 
558 /*
559  * PCIe AER errors need to be sent to the AER driver for reporting and
560  * recovery. The GHES severities map to the following AER severities and
561  * require the following handling:
562  *
563  * GHES_SEV_CORRECTABLE -> AER_CORRECTABLE
564  *     These need to be reported by the AER driver but no recovery is
565  *     necessary.
566  * GHES_SEV_RECOVERABLE -> AER_NONFATAL
567  * GHES_SEV_RECOVERABLE && CPER_SEC_RESET -> AER_FATAL
568  *     These both need to be reported and recovered from by the AER driver.
569  * GHES_SEV_PANIC does not make it to this handling since the kernel must
570  *     panic.
571  */
572 static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
573 {
574 #ifdef CONFIG_ACPI_APEI_PCIEAER
575 	struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata);
576 
577 	if (pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
578 	    pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
579 		unsigned int devfn;
580 		int aer_severity;
581 		u8 *aer_info;
582 
583 		devfn = PCI_DEVFN(pcie_err->device_id.device,
584 				  pcie_err->device_id.function);
585 		aer_severity = cper_severity_to_aer(gdata->error_severity);
586 
587 		/*
588 		 * If firmware reset the component to contain
589 		 * the error, we must reinitialize it before
590 		 * use, so treat it as a fatal AER error.
591 		 */
592 		if (gdata->flags & CPER_SEC_RESET)
593 			aer_severity = AER_FATAL;
594 
595 		aer_info = (void *)gen_pool_alloc(ghes_estatus_pool,
596 						  sizeof(struct aer_capability_regs));
597 		if (!aer_info)
598 			return;
599 		memcpy(aer_info, pcie_err->aer_info, sizeof(struct aer_capability_regs));
600 
601 		aer_recover_queue(pcie_err->device_id.segment,
602 				  pcie_err->device_id.bus,
603 				  devfn, aer_severity,
604 				  (struct aer_capability_regs *)
605 				  aer_info);
606 	}
607 #endif
608 }
609 
610 static BLOCKING_NOTIFIER_HEAD(vendor_record_notify_list);
611 
612 int ghes_register_vendor_record_notifier(struct notifier_block *nb)
613 {
614 	return blocking_notifier_chain_register(&vendor_record_notify_list, nb);
615 }
616 EXPORT_SYMBOL_GPL(ghes_register_vendor_record_notifier);
617 
618 void ghes_unregister_vendor_record_notifier(struct notifier_block *nb)
619 {
620 	blocking_notifier_chain_unregister(&vendor_record_notify_list, nb);
621 }
622 EXPORT_SYMBOL_GPL(ghes_unregister_vendor_record_notifier);
623 
624 static void ghes_vendor_record_work_func(struct work_struct *work)
625 {
626 	struct ghes_vendor_record_entry *entry;
627 	struct acpi_hest_generic_data *gdata;
628 	u32 len;
629 
630 	entry = container_of(work, struct ghes_vendor_record_entry, work);
631 	gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry);
632 
633 	blocking_notifier_call_chain(&vendor_record_notify_list,
634 				     entry->error_severity, gdata);
635 
636 	len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata));
637 	gen_pool_free(ghes_estatus_pool, (unsigned long)entry, len);
638 }
639 
640 static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
641 					  int sev)
642 {
643 	struct acpi_hest_generic_data *copied_gdata;
644 	struct ghes_vendor_record_entry *entry;
645 	u32 len;
646 
647 	len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata));
648 	entry = (void *)gen_pool_alloc(ghes_estatus_pool, len);
649 	if (!entry)
650 		return;
651 
652 	copied_gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry);
653 	memcpy(copied_gdata, gdata, acpi_hest_get_record_size(gdata));
654 	entry->error_severity = sev;
655 
656 	INIT_WORK(&entry->work, ghes_vendor_record_work_func);
657 	schedule_work(&entry->work);
658 }
659 
660 static bool ghes_do_proc(struct ghes *ghes,
661 			 const struct acpi_hest_generic_status *estatus)
662 {
663 	int sev, sec_sev;
664 	struct acpi_hest_generic_data *gdata;
665 	guid_t *sec_type;
666 	const guid_t *fru_id = &guid_null;
667 	char *fru_text = "";
668 	bool queued = false;
669 
670 	sev = ghes_severity(estatus->error_severity);
671 	apei_estatus_for_each_section(estatus, gdata) {
672 		sec_type = (guid_t *)gdata->section_type;
673 		sec_sev = ghes_severity(gdata->error_severity);
674 		if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
675 			fru_id = (guid_t *)gdata->fru_id;
676 
677 		if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
678 			fru_text = gdata->fru_text;
679 
680 		if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
681 			struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
682 
683 			atomic_notifier_call_chain(&ghes_report_chain, sev, mem_err);
684 
685 			arch_apei_report_mem_error(sev, mem_err);
686 			queued = ghes_handle_memory_failure(gdata, sev);
687 		}
688 		else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
689 			ghes_handle_aer(gdata);
690 		}
691 		else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
692 			queued = ghes_handle_arm_hw_error(gdata, sev);
693 		} else {
694 			void *err = acpi_hest_get_payload(gdata);
695 
696 			ghes_defer_non_standard_event(gdata, sev);
697 			log_non_standard_event(sec_type, fru_id, fru_text,
698 					       sec_sev, err,
699 					       gdata->error_data_length);
700 		}
701 	}
702 
703 	return queued;
704 }
705 
706 static void __ghes_print_estatus(const char *pfx,
707 				 const struct acpi_hest_generic *generic,
708 				 const struct acpi_hest_generic_status *estatus)
709 {
710 	static atomic_t seqno;
711 	unsigned int curr_seqno;
712 	char pfx_seq[64];
713 
714 	if (pfx == NULL) {
715 		if (ghes_severity(estatus->error_severity) <=
716 		    GHES_SEV_CORRECTED)
717 			pfx = KERN_WARNING;
718 		else
719 			pfx = KERN_ERR;
720 	}
721 	curr_seqno = atomic_inc_return(&seqno);
722 	snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
723 	printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
724 	       pfx_seq, generic->header.source_id);
725 	cper_estatus_print(pfx_seq, estatus);
726 }
727 
728 static int ghes_print_estatus(const char *pfx,
729 			      const struct acpi_hest_generic *generic,
730 			      const struct acpi_hest_generic_status *estatus)
731 {
732 	/* Not more than 2 messages every 5 seconds */
733 	static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
734 	static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2);
735 	struct ratelimit_state *ratelimit;
736 
737 	if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED)
738 		ratelimit = &ratelimit_corrected;
739 	else
740 		ratelimit = &ratelimit_uncorrected;
741 	if (__ratelimit(ratelimit)) {
742 		__ghes_print_estatus(pfx, generic, estatus);
743 		return 1;
744 	}
745 	return 0;
746 }
747 
748 /*
749  * GHES error status reporting throttle, to report more kinds of
750  * errors, instead of just most frequently occurred errors.
751  */
752 static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
753 {
754 	u32 len;
755 	int i, cached = 0;
756 	unsigned long long now;
757 	struct ghes_estatus_cache *cache;
758 	struct acpi_hest_generic_status *cache_estatus;
759 
760 	len = cper_estatus_len(estatus);
761 	rcu_read_lock();
762 	for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
763 		cache = rcu_dereference(ghes_estatus_caches[i]);
764 		if (cache == NULL)
765 			continue;
766 		if (len != cache->estatus_len)
767 			continue;
768 		cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
769 		if (memcmp(estatus, cache_estatus, len))
770 			continue;
771 		atomic_inc(&cache->count);
772 		now = sched_clock();
773 		if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC)
774 			cached = 1;
775 		break;
776 	}
777 	rcu_read_unlock();
778 	return cached;
779 }
780 
781 static struct ghes_estatus_cache *ghes_estatus_cache_alloc(
782 	struct acpi_hest_generic *generic,
783 	struct acpi_hest_generic_status *estatus)
784 {
785 	int alloced;
786 	u32 len, cache_len;
787 	struct ghes_estatus_cache *cache;
788 	struct acpi_hest_generic_status *cache_estatus;
789 
790 	alloced = atomic_add_return(1, &ghes_estatus_cache_alloced);
791 	if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) {
792 		atomic_dec(&ghes_estatus_cache_alloced);
793 		return NULL;
794 	}
795 	len = cper_estatus_len(estatus);
796 	cache_len = GHES_ESTATUS_CACHE_LEN(len);
797 	cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len);
798 	if (!cache) {
799 		atomic_dec(&ghes_estatus_cache_alloced);
800 		return NULL;
801 	}
802 	cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
803 	memcpy(cache_estatus, estatus, len);
804 	cache->estatus_len = len;
805 	atomic_set(&cache->count, 0);
806 	cache->generic = generic;
807 	cache->time_in = sched_clock();
808 	return cache;
809 }
810 
811 static void ghes_estatus_cache_rcu_free(struct rcu_head *head)
812 {
813 	struct ghes_estatus_cache *cache;
814 	u32 len;
815 
816 	cache = container_of(head, struct ghes_estatus_cache, rcu);
817 	len = cper_estatus_len(GHES_ESTATUS_FROM_CACHE(cache));
818 	len = GHES_ESTATUS_CACHE_LEN(len);
819 	gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len);
820 	atomic_dec(&ghes_estatus_cache_alloced);
821 }
822 
823 static void
824 ghes_estatus_cache_add(struct acpi_hest_generic *generic,
825 		       struct acpi_hest_generic_status *estatus)
826 {
827 	unsigned long long now, duration, period, max_period = 0;
828 	struct ghes_estatus_cache *cache, *new_cache;
829 	struct ghes_estatus_cache __rcu *victim;
830 	int i, slot = -1, count;
831 
832 	new_cache = ghes_estatus_cache_alloc(generic, estatus);
833 	if (!new_cache)
834 		return;
835 
836 	rcu_read_lock();
837 	now = sched_clock();
838 	for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
839 		cache = rcu_dereference(ghes_estatus_caches[i]);
840 		if (cache == NULL) {
841 			slot = i;
842 			break;
843 		}
844 		duration = now - cache->time_in;
845 		if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) {
846 			slot = i;
847 			break;
848 		}
849 		count = atomic_read(&cache->count);
850 		period = duration;
851 		do_div(period, (count + 1));
852 		if (period > max_period) {
853 			max_period = period;
854 			slot = i;
855 		}
856 	}
857 	rcu_read_unlock();
858 
859 	if (slot != -1) {
860 		/*
861 		 * Use release semantics to ensure that ghes_estatus_cached()
862 		 * running on another CPU will see the updated cache fields if
863 		 * it can see the new value of the pointer.
864 		 */
865 		victim = xchg_release(&ghes_estatus_caches[slot],
866 				      RCU_INITIALIZER(new_cache));
867 
868 		/*
869 		 * At this point, victim may point to a cached item different
870 		 * from the one based on which we selected the slot. Instead of
871 		 * going to the loop again to pick another slot, let's just
872 		 * drop the other item anyway: this may cause a false cache
873 		 * miss later on, but that won't cause any problems.
874 		 */
875 		if (victim)
876 			call_rcu(&unrcu_pointer(victim)->rcu,
877 				 ghes_estatus_cache_rcu_free);
878 	}
879 }
880 
881 static void __ghes_panic(struct ghes *ghes,
882 			 struct acpi_hest_generic_status *estatus,
883 			 u64 buf_paddr, enum fixed_addresses fixmap_idx)
884 {
885 	__ghes_print_estatus(KERN_EMERG, ghes->generic, estatus);
886 
887 	ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
888 
889 	/* reboot to log the error! */
890 	if (!panic_timeout)
891 		panic_timeout = ghes_panic_timeout;
892 	panic("Fatal hardware error!");
893 }
894 
895 static int ghes_proc(struct ghes *ghes)
896 {
897 	struct acpi_hest_generic_status *estatus = ghes->estatus;
898 	u64 buf_paddr;
899 	int rc;
900 
901 	rc = ghes_read_estatus(ghes, estatus, &buf_paddr, FIX_APEI_GHES_IRQ);
902 	if (rc)
903 		goto out;
904 
905 	if (ghes_severity(estatus->error_severity) >= GHES_SEV_PANIC)
906 		__ghes_panic(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ);
907 
908 	if (!ghes_estatus_cached(estatus)) {
909 		if (ghes_print_estatus(NULL, ghes->generic, estatus))
910 			ghes_estatus_cache_add(ghes->generic, estatus);
911 	}
912 	ghes_do_proc(ghes, estatus);
913 
914 out:
915 	ghes_clear_estatus(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ);
916 
917 	return rc;
918 }
919 
920 static void ghes_add_timer(struct ghes *ghes)
921 {
922 	struct acpi_hest_generic *g = ghes->generic;
923 	unsigned long expire;
924 
925 	if (!g->notify.poll_interval) {
926 		pr_warn(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n",
927 			g->header.source_id);
928 		return;
929 	}
930 	expire = jiffies + msecs_to_jiffies(g->notify.poll_interval);
931 	ghes->timer.expires = round_jiffies_relative(expire);
932 	add_timer(&ghes->timer);
933 }
934 
935 static void ghes_poll_func(struct timer_list *t)
936 {
937 	struct ghes *ghes = from_timer(ghes, t, timer);
938 	unsigned long flags;
939 
940 	spin_lock_irqsave(&ghes_notify_lock_irq, flags);
941 	ghes_proc(ghes);
942 	spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
943 	if (!(ghes->flags & GHES_EXITING))
944 		ghes_add_timer(ghes);
945 }
946 
947 static irqreturn_t ghes_irq_func(int irq, void *data)
948 {
949 	struct ghes *ghes = data;
950 	unsigned long flags;
951 	int rc;
952 
953 	spin_lock_irqsave(&ghes_notify_lock_irq, flags);
954 	rc = ghes_proc(ghes);
955 	spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
956 	if (rc)
957 		return IRQ_NONE;
958 
959 	return IRQ_HANDLED;
960 }
961 
962 static int ghes_notify_hed(struct notifier_block *this, unsigned long event,
963 			   void *data)
964 {
965 	struct ghes *ghes;
966 	unsigned long flags;
967 	int ret = NOTIFY_DONE;
968 
969 	spin_lock_irqsave(&ghes_notify_lock_irq, flags);
970 	rcu_read_lock();
971 	list_for_each_entry_rcu(ghes, &ghes_hed, list) {
972 		if (!ghes_proc(ghes))
973 			ret = NOTIFY_OK;
974 	}
975 	rcu_read_unlock();
976 	spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
977 
978 	return ret;
979 }
980 
981 static struct notifier_block ghes_notifier_hed = {
982 	.notifier_call = ghes_notify_hed,
983 };
984 
985 /*
986  * Handlers for CPER records may not be NMI safe. For example,
987  * memory_failure_queue() takes spinlocks and calls schedule_work_on().
988  * In any NMI-like handler, memory from ghes_estatus_pool is used to save
989  * estatus, and added to the ghes_estatus_llist. irq_work_queue() causes
990  * ghes_proc_in_irq() to run in IRQ context where each estatus in
991  * ghes_estatus_llist is processed.
992  *
993  * Memory from the ghes_estatus_pool is also used with the ghes_estatus_cache
994  * to suppress frequent messages.
995  */
996 static struct llist_head ghes_estatus_llist;
997 static struct irq_work ghes_proc_irq_work;
998 
999 static void ghes_proc_in_irq(struct irq_work *irq_work)
1000 {
1001 	struct llist_node *llnode, *next;
1002 	struct ghes_estatus_node *estatus_node;
1003 	struct acpi_hest_generic *generic;
1004 	struct acpi_hest_generic_status *estatus;
1005 	bool task_work_pending;
1006 	u32 len, node_len;
1007 	int ret;
1008 
1009 	llnode = llist_del_all(&ghes_estatus_llist);
1010 	/*
1011 	 * Because the time order of estatus in list is reversed,
1012 	 * revert it back to proper order.
1013 	 */
1014 	llnode = llist_reverse_order(llnode);
1015 	while (llnode) {
1016 		next = llnode->next;
1017 		estatus_node = llist_entry(llnode, struct ghes_estatus_node,
1018 					   llnode);
1019 		estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
1020 		len = cper_estatus_len(estatus);
1021 		node_len = GHES_ESTATUS_NODE_LEN(len);
1022 		task_work_pending = ghes_do_proc(estatus_node->ghes, estatus);
1023 		if (!ghes_estatus_cached(estatus)) {
1024 			generic = estatus_node->generic;
1025 			if (ghes_print_estatus(NULL, generic, estatus))
1026 				ghes_estatus_cache_add(generic, estatus);
1027 		}
1028 
1029 		if (task_work_pending && current->mm) {
1030 			estatus_node->task_work.func = ghes_kick_task_work;
1031 			estatus_node->task_work_cpu = smp_processor_id();
1032 			ret = task_work_add(current, &estatus_node->task_work,
1033 					    TWA_RESUME);
1034 			if (ret)
1035 				estatus_node->task_work.func = NULL;
1036 		}
1037 
1038 		if (!estatus_node->task_work.func)
1039 			gen_pool_free(ghes_estatus_pool,
1040 				      (unsigned long)estatus_node, node_len);
1041 
1042 		llnode = next;
1043 	}
1044 }
1045 
1046 static void ghes_print_queued_estatus(void)
1047 {
1048 	struct llist_node *llnode;
1049 	struct ghes_estatus_node *estatus_node;
1050 	struct acpi_hest_generic *generic;
1051 	struct acpi_hest_generic_status *estatus;
1052 
1053 	llnode = llist_del_all(&ghes_estatus_llist);
1054 	/*
1055 	 * Because the time order of estatus in list is reversed,
1056 	 * revert it back to proper order.
1057 	 */
1058 	llnode = llist_reverse_order(llnode);
1059 	while (llnode) {
1060 		estatus_node = llist_entry(llnode, struct ghes_estatus_node,
1061 					   llnode);
1062 		estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
1063 		generic = estatus_node->generic;
1064 		ghes_print_estatus(NULL, generic, estatus);
1065 		llnode = llnode->next;
1066 	}
1067 }
1068 
1069 static int ghes_in_nmi_queue_one_entry(struct ghes *ghes,
1070 				       enum fixed_addresses fixmap_idx)
1071 {
1072 	struct acpi_hest_generic_status *estatus, tmp_header;
1073 	struct ghes_estatus_node *estatus_node;
1074 	u32 len, node_len;
1075 	u64 buf_paddr;
1076 	int sev, rc;
1077 
1078 	if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG))
1079 		return -EOPNOTSUPP;
1080 
1081 	rc = __ghes_peek_estatus(ghes, &tmp_header, &buf_paddr, fixmap_idx);
1082 	if (rc) {
1083 		ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
1084 		return rc;
1085 	}
1086 
1087 	rc = __ghes_check_estatus(ghes, &tmp_header);
1088 	if (rc) {
1089 		ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
1090 		return rc;
1091 	}
1092 
1093 	len = cper_estatus_len(&tmp_header);
1094 	node_len = GHES_ESTATUS_NODE_LEN(len);
1095 	estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, node_len);
1096 	if (!estatus_node)
1097 		return -ENOMEM;
1098 
1099 	estatus_node->ghes = ghes;
1100 	estatus_node->generic = ghes->generic;
1101 	estatus_node->task_work.func = NULL;
1102 	estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
1103 
1104 	if (__ghes_read_estatus(estatus, buf_paddr, fixmap_idx, len)) {
1105 		ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
1106 		rc = -ENOENT;
1107 		goto no_work;
1108 	}
1109 
1110 	sev = ghes_severity(estatus->error_severity);
1111 	if (sev >= GHES_SEV_PANIC) {
1112 		ghes_print_queued_estatus();
1113 		__ghes_panic(ghes, estatus, buf_paddr, fixmap_idx);
1114 	}
1115 
1116 	ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
1117 
1118 	/* This error has been reported before, don't process it again. */
1119 	if (ghes_estatus_cached(estatus))
1120 		goto no_work;
1121 
1122 	llist_add(&estatus_node->llnode, &ghes_estatus_llist);
1123 
1124 	return rc;
1125 
1126 no_work:
1127 	gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
1128 		      node_len);
1129 
1130 	return rc;
1131 }
1132 
1133 static int ghes_in_nmi_spool_from_list(struct list_head *rcu_list,
1134 				       enum fixed_addresses fixmap_idx)
1135 {
1136 	int ret = -ENOENT;
1137 	struct ghes *ghes;
1138 
1139 	rcu_read_lock();
1140 	list_for_each_entry_rcu(ghes, rcu_list, list) {
1141 		if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx))
1142 			ret = 0;
1143 	}
1144 	rcu_read_unlock();
1145 
1146 	if (IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) && !ret)
1147 		irq_work_queue(&ghes_proc_irq_work);
1148 
1149 	return ret;
1150 }
1151 
1152 #ifdef CONFIG_ACPI_APEI_SEA
1153 static LIST_HEAD(ghes_sea);
1154 
1155 /*
1156  * Return 0 only if one of the SEA error sources successfully reported an error
1157  * record sent from the firmware.
1158  */
1159 int ghes_notify_sea(void)
1160 {
1161 	static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sea);
1162 	int rv;
1163 
1164 	raw_spin_lock(&ghes_notify_lock_sea);
1165 	rv = ghes_in_nmi_spool_from_list(&ghes_sea, FIX_APEI_GHES_SEA);
1166 	raw_spin_unlock(&ghes_notify_lock_sea);
1167 
1168 	return rv;
1169 }
1170 
1171 static void ghes_sea_add(struct ghes *ghes)
1172 {
1173 	mutex_lock(&ghes_list_mutex);
1174 	list_add_rcu(&ghes->list, &ghes_sea);
1175 	mutex_unlock(&ghes_list_mutex);
1176 }
1177 
1178 static void ghes_sea_remove(struct ghes *ghes)
1179 {
1180 	mutex_lock(&ghes_list_mutex);
1181 	list_del_rcu(&ghes->list);
1182 	mutex_unlock(&ghes_list_mutex);
1183 	synchronize_rcu();
1184 }
1185 #else /* CONFIG_ACPI_APEI_SEA */
1186 static inline void ghes_sea_add(struct ghes *ghes) { }
1187 static inline void ghes_sea_remove(struct ghes *ghes) { }
1188 #endif /* CONFIG_ACPI_APEI_SEA */
1189 
1190 #ifdef CONFIG_HAVE_ACPI_APEI_NMI
1191 /*
1192  * NMI may be triggered on any CPU, so ghes_in_nmi is used for
1193  * having only one concurrent reader.
1194  */
1195 static atomic_t ghes_in_nmi = ATOMIC_INIT(0);
1196 
1197 static LIST_HEAD(ghes_nmi);
1198 
1199 static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
1200 {
1201 	static DEFINE_RAW_SPINLOCK(ghes_notify_lock_nmi);
1202 	int ret = NMI_DONE;
1203 
1204 	if (!atomic_add_unless(&ghes_in_nmi, 1, 1))
1205 		return ret;
1206 
1207 	raw_spin_lock(&ghes_notify_lock_nmi);
1208 	if (!ghes_in_nmi_spool_from_list(&ghes_nmi, FIX_APEI_GHES_NMI))
1209 		ret = NMI_HANDLED;
1210 	raw_spin_unlock(&ghes_notify_lock_nmi);
1211 
1212 	atomic_dec(&ghes_in_nmi);
1213 	return ret;
1214 }
1215 
1216 static void ghes_nmi_add(struct ghes *ghes)
1217 {
1218 	mutex_lock(&ghes_list_mutex);
1219 	if (list_empty(&ghes_nmi))
1220 		register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0, "ghes");
1221 	list_add_rcu(&ghes->list, &ghes_nmi);
1222 	mutex_unlock(&ghes_list_mutex);
1223 }
1224 
1225 static void ghes_nmi_remove(struct ghes *ghes)
1226 {
1227 	mutex_lock(&ghes_list_mutex);
1228 	list_del_rcu(&ghes->list);
1229 	if (list_empty(&ghes_nmi))
1230 		unregister_nmi_handler(NMI_LOCAL, "ghes");
1231 	mutex_unlock(&ghes_list_mutex);
1232 	/*
1233 	 * To synchronize with NMI handler, ghes can only be
1234 	 * freed after NMI handler finishes.
1235 	 */
1236 	synchronize_rcu();
1237 }
1238 #else /* CONFIG_HAVE_ACPI_APEI_NMI */
1239 static inline void ghes_nmi_add(struct ghes *ghes) { }
1240 static inline void ghes_nmi_remove(struct ghes *ghes) { }
1241 #endif /* CONFIG_HAVE_ACPI_APEI_NMI */
1242 
1243 static void ghes_nmi_init_cxt(void)
1244 {
1245 	init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq);
1246 }
1247 
1248 static int __ghes_sdei_callback(struct ghes *ghes,
1249 				enum fixed_addresses fixmap_idx)
1250 {
1251 	if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx)) {
1252 		irq_work_queue(&ghes_proc_irq_work);
1253 
1254 		return 0;
1255 	}
1256 
1257 	return -ENOENT;
1258 }
1259 
1260 static int ghes_sdei_normal_callback(u32 event_num, struct pt_regs *regs,
1261 				      void *arg)
1262 {
1263 	static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_normal);
1264 	struct ghes *ghes = arg;
1265 	int err;
1266 
1267 	raw_spin_lock(&ghes_notify_lock_sdei_normal);
1268 	err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_NORMAL);
1269 	raw_spin_unlock(&ghes_notify_lock_sdei_normal);
1270 
1271 	return err;
1272 }
1273 
1274 static int ghes_sdei_critical_callback(u32 event_num, struct pt_regs *regs,
1275 				       void *arg)
1276 {
1277 	static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_critical);
1278 	struct ghes *ghes = arg;
1279 	int err;
1280 
1281 	raw_spin_lock(&ghes_notify_lock_sdei_critical);
1282 	err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_CRITICAL);
1283 	raw_spin_unlock(&ghes_notify_lock_sdei_critical);
1284 
1285 	return err;
1286 }
1287 
1288 static int apei_sdei_register_ghes(struct ghes *ghes)
1289 {
1290 	if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
1291 		return -EOPNOTSUPP;
1292 
1293 	return sdei_register_ghes(ghes, ghes_sdei_normal_callback,
1294 				 ghes_sdei_critical_callback);
1295 }
1296 
1297 static int apei_sdei_unregister_ghes(struct ghes *ghes)
1298 {
1299 	if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
1300 		return -EOPNOTSUPP;
1301 
1302 	return sdei_unregister_ghes(ghes);
1303 }
1304 
1305 static int ghes_probe(struct platform_device *ghes_dev)
1306 {
1307 	struct acpi_hest_generic *generic;
1308 	struct ghes *ghes = NULL;
1309 	unsigned long flags;
1310 
1311 	int rc = -EINVAL;
1312 
1313 	generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
1314 	if (!generic->enabled)
1315 		return -ENODEV;
1316 
1317 	switch (generic->notify.type) {
1318 	case ACPI_HEST_NOTIFY_POLLED:
1319 	case ACPI_HEST_NOTIFY_EXTERNAL:
1320 	case ACPI_HEST_NOTIFY_SCI:
1321 	case ACPI_HEST_NOTIFY_GSIV:
1322 	case ACPI_HEST_NOTIFY_GPIO:
1323 		break;
1324 
1325 	case ACPI_HEST_NOTIFY_SEA:
1326 		if (!IS_ENABLED(CONFIG_ACPI_APEI_SEA)) {
1327 			pr_warn(GHES_PFX "Generic hardware error source: %d notified via SEA is not supported\n",
1328 				generic->header.source_id);
1329 			rc = -ENOTSUPP;
1330 			goto err;
1331 		}
1332 		break;
1333 	case ACPI_HEST_NOTIFY_NMI:
1334 		if (!IS_ENABLED(CONFIG_HAVE_ACPI_APEI_NMI)) {
1335 			pr_warn(GHES_PFX "Generic hardware error source: %d notified via NMI interrupt is not supported!\n",
1336 				generic->header.source_id);
1337 			goto err;
1338 		}
1339 		break;
1340 	case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1341 		if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE)) {
1342 			pr_warn(GHES_PFX "Generic hardware error source: %d notified via SDE Interface is not supported!\n",
1343 				generic->header.source_id);
1344 			goto err;
1345 		}
1346 		break;
1347 	case ACPI_HEST_NOTIFY_LOCAL:
1348 		pr_warn(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
1349 			generic->header.source_id);
1350 		goto err;
1351 	default:
1352 		pr_warn(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n",
1353 			generic->notify.type, generic->header.source_id);
1354 		goto err;
1355 	}
1356 
1357 	rc = -EIO;
1358 	if (generic->error_block_length <
1359 	    sizeof(struct acpi_hest_generic_status)) {
1360 		pr_warn(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
1361 			generic->error_block_length, generic->header.source_id);
1362 		goto err;
1363 	}
1364 	ghes = ghes_new(generic);
1365 	if (IS_ERR(ghes)) {
1366 		rc = PTR_ERR(ghes);
1367 		ghes = NULL;
1368 		goto err;
1369 	}
1370 
1371 	switch (generic->notify.type) {
1372 	case ACPI_HEST_NOTIFY_POLLED:
1373 		timer_setup(&ghes->timer, ghes_poll_func, 0);
1374 		ghes_add_timer(ghes);
1375 		break;
1376 	case ACPI_HEST_NOTIFY_EXTERNAL:
1377 		/* External interrupt vector is GSI */
1378 		rc = acpi_gsi_to_irq(generic->notify.vector, &ghes->irq);
1379 		if (rc) {
1380 			pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
1381 			       generic->header.source_id);
1382 			goto err;
1383 		}
1384 		rc = request_irq(ghes->irq, ghes_irq_func, IRQF_SHARED,
1385 				 "GHES IRQ", ghes);
1386 		if (rc) {
1387 			pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
1388 			       generic->header.source_id);
1389 			goto err;
1390 		}
1391 		break;
1392 
1393 	case ACPI_HEST_NOTIFY_SCI:
1394 	case ACPI_HEST_NOTIFY_GSIV:
1395 	case ACPI_HEST_NOTIFY_GPIO:
1396 		mutex_lock(&ghes_list_mutex);
1397 		if (list_empty(&ghes_hed))
1398 			register_acpi_hed_notifier(&ghes_notifier_hed);
1399 		list_add_rcu(&ghes->list, &ghes_hed);
1400 		mutex_unlock(&ghes_list_mutex);
1401 		break;
1402 
1403 	case ACPI_HEST_NOTIFY_SEA:
1404 		ghes_sea_add(ghes);
1405 		break;
1406 	case ACPI_HEST_NOTIFY_NMI:
1407 		ghes_nmi_add(ghes);
1408 		break;
1409 	case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1410 		rc = apei_sdei_register_ghes(ghes);
1411 		if (rc)
1412 			goto err;
1413 		break;
1414 	default:
1415 		BUG();
1416 	}
1417 
1418 	platform_set_drvdata(ghes_dev, ghes);
1419 
1420 	ghes->dev = &ghes_dev->dev;
1421 
1422 	mutex_lock(&ghes_devs_mutex);
1423 	list_add_tail(&ghes->elist, &ghes_devs);
1424 	mutex_unlock(&ghes_devs_mutex);
1425 
1426 	/* Handle any pending errors right away */
1427 	spin_lock_irqsave(&ghes_notify_lock_irq, flags);
1428 	ghes_proc(ghes);
1429 	spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
1430 
1431 	return 0;
1432 
1433 err:
1434 	if (ghes) {
1435 		ghes_fini(ghes);
1436 		kfree(ghes);
1437 	}
1438 	return rc;
1439 }
1440 
1441 static int ghes_remove(struct platform_device *ghes_dev)
1442 {
1443 	int rc;
1444 	struct ghes *ghes;
1445 	struct acpi_hest_generic *generic;
1446 
1447 	ghes = platform_get_drvdata(ghes_dev);
1448 	generic = ghes->generic;
1449 
1450 	ghes->flags |= GHES_EXITING;
1451 	switch (generic->notify.type) {
1452 	case ACPI_HEST_NOTIFY_POLLED:
1453 		timer_shutdown_sync(&ghes->timer);
1454 		break;
1455 	case ACPI_HEST_NOTIFY_EXTERNAL:
1456 		free_irq(ghes->irq, ghes);
1457 		break;
1458 
1459 	case ACPI_HEST_NOTIFY_SCI:
1460 	case ACPI_HEST_NOTIFY_GSIV:
1461 	case ACPI_HEST_NOTIFY_GPIO:
1462 		mutex_lock(&ghes_list_mutex);
1463 		list_del_rcu(&ghes->list);
1464 		if (list_empty(&ghes_hed))
1465 			unregister_acpi_hed_notifier(&ghes_notifier_hed);
1466 		mutex_unlock(&ghes_list_mutex);
1467 		synchronize_rcu();
1468 		break;
1469 
1470 	case ACPI_HEST_NOTIFY_SEA:
1471 		ghes_sea_remove(ghes);
1472 		break;
1473 	case ACPI_HEST_NOTIFY_NMI:
1474 		ghes_nmi_remove(ghes);
1475 		break;
1476 	case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1477 		rc = apei_sdei_unregister_ghes(ghes);
1478 		if (rc)
1479 			return rc;
1480 		break;
1481 	default:
1482 		BUG();
1483 		break;
1484 	}
1485 
1486 	ghes_fini(ghes);
1487 
1488 	mutex_lock(&ghes_devs_mutex);
1489 	list_del(&ghes->elist);
1490 	mutex_unlock(&ghes_devs_mutex);
1491 
1492 	kfree(ghes);
1493 
1494 	return 0;
1495 }
1496 
1497 static struct platform_driver ghes_platform_driver = {
1498 	.driver		= {
1499 		.name	= "GHES",
1500 	},
1501 	.probe		= ghes_probe,
1502 	.remove		= ghes_remove,
1503 };
1504 
1505 void __init acpi_ghes_init(void)
1506 {
1507 	int rc;
1508 
1509 	sdei_init();
1510 
1511 	if (acpi_disabled)
1512 		return;
1513 
1514 	switch (hest_disable) {
1515 	case HEST_NOT_FOUND:
1516 		return;
1517 	case HEST_DISABLED:
1518 		pr_info(GHES_PFX "HEST is not enabled!\n");
1519 		return;
1520 	default:
1521 		break;
1522 	}
1523 
1524 	if (ghes_disable) {
1525 		pr_info(GHES_PFX "GHES is not enabled!\n");
1526 		return;
1527 	}
1528 
1529 	ghes_nmi_init_cxt();
1530 
1531 	rc = platform_driver_register(&ghes_platform_driver);
1532 	if (rc)
1533 		return;
1534 
1535 	rc = apei_osc_setup();
1536 	if (rc == 0 && osc_sb_apei_support_acked)
1537 		pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n");
1538 	else if (rc == 0 && !osc_sb_apei_support_acked)
1539 		pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n");
1540 	else if (rc && osc_sb_apei_support_acked)
1541 		pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n");
1542 	else
1543 		pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n");
1544 }
1545 
1546 /*
1547  * Known x86 systems that prefer GHES error reporting:
1548  */
1549 static struct acpi_platform_list plat_list[] = {
1550 	{"HPE   ", "Server  ", 0, ACPI_SIG_FADT, all_versions},
1551 	{ } /* End */
1552 };
1553 
1554 struct list_head *ghes_get_devices(void)
1555 {
1556 	int idx = -1;
1557 
1558 	if (IS_ENABLED(CONFIG_X86)) {
1559 		idx = acpi_match_platform_list(plat_list);
1560 		if (idx < 0) {
1561 			if (!ghes_edac_force_enable)
1562 				return NULL;
1563 
1564 			pr_warn_once("Force-loading ghes_edac on an unsupported platform. You're on your own!\n");
1565 		}
1566 	} else if (list_empty(&ghes_devs)) {
1567 		return NULL;
1568 	}
1569 
1570 	return &ghes_devs;
1571 }
1572 EXPORT_SYMBOL_GPL(ghes_get_devices);
1573 
1574 void ghes_register_report_chain(struct notifier_block *nb)
1575 {
1576 	atomic_notifier_chain_register(&ghes_report_chain, nb);
1577 }
1578 EXPORT_SYMBOL_GPL(ghes_register_report_chain);
1579 
1580 void ghes_unregister_report_chain(struct notifier_block *nb)
1581 {
1582 	atomic_notifier_chain_unregister(&ghes_report_chain, nb);
1583 }
1584 EXPORT_SYMBOL_GPL(ghes_unregister_report_chain);
1585