xref: /linux/arch/x86/kernel/cpu/mce/core.c (revision f14aa5ea415b8add245e976bfab96a12986c6843)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Machine check handler.
4  *
5  * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
6  * Rest from unknown author(s).
7  * 2004 Andi Kleen. Rewrote most of it.
8  * Copyright 2008 Intel Corporation
9  * Author: Andi Kleen
10  */
11 
12 #include <linux/thread_info.h>
13 #include <linux/capability.h>
14 #include <linux/miscdevice.h>
15 #include <linux/ratelimit.h>
16 #include <linux/rcupdate.h>
17 #include <linux/kobject.h>
18 #include <linux/uaccess.h>
19 #include <linux/kdebug.h>
20 #include <linux/kernel.h>
21 #include <linux/percpu.h>
22 #include <linux/string.h>
23 #include <linux/device.h>
24 #include <linux/syscore_ops.h>
25 #include <linux/delay.h>
26 #include <linux/ctype.h>
27 #include <linux/sched.h>
28 #include <linux/sysfs.h>
29 #include <linux/types.h>
30 #include <linux/slab.h>
31 #include <linux/init.h>
32 #include <linux/kmod.h>
33 #include <linux/poll.h>
34 #include <linux/nmi.h>
35 #include <linux/cpu.h>
36 #include <linux/ras.h>
37 #include <linux/smp.h>
38 #include <linux/fs.h>
39 #include <linux/mm.h>
40 #include <linux/debugfs.h>
41 #include <linux/irq_work.h>
42 #include <linux/export.h>
43 #include <linux/set_memory.h>
44 #include <linux/sync_core.h>
45 #include <linux/task_work.h>
46 #include <linux/hardirq.h>
47 #include <linux/kexec.h>
48 
49 #include <asm/fred.h>
50 #include <asm/cpu_device_id.h>
51 #include <asm/processor.h>
52 #include <asm/traps.h>
53 #include <asm/tlbflush.h>
54 #include <asm/mce.h>
55 #include <asm/msr.h>
56 #include <asm/reboot.h>
57 #include <asm/tdx.h>
58 
59 #include "internal.h"
60 
61 /* sysfs synchronization */
62 static DEFINE_MUTEX(mce_sysfs_mutex);
63 
64 #define CREATE_TRACE_POINTS
65 #include <trace/events/mce.h>
66 
67 #define SPINUNIT		100	/* 100ns */
68 
69 DEFINE_PER_CPU(unsigned, mce_exception_count);
70 
71 DEFINE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks);
72 
73 DEFINE_PER_CPU_READ_MOSTLY(struct mce_bank[MAX_NR_BANKS], mce_banks_array);
74 
75 #define ATTR_LEN               16
76 /* One object for each MCE bank, shared by all CPUs */
77 struct mce_bank_dev {
78 	struct device_attribute	attr;			/* device attribute */
79 	char			attrname[ATTR_LEN];	/* attribute name */
80 	u8			bank;			/* bank number */
81 };
82 static struct mce_bank_dev mce_bank_devs[MAX_NR_BANKS];
83 
84 struct mce_vendor_flags mce_flags __read_mostly;
85 
86 struct mca_config mca_cfg __read_mostly = {
87 	.bootlog  = -1,
88 	.monarch_timeout = -1
89 };
90 
91 static DEFINE_PER_CPU(struct mce, mces_seen);
92 static unsigned long mce_need_notify;
93 
94 /*
95  * MCA banks polled by the period polling timer for corrected events.
96  * With Intel CMCI, this only has MCA banks which do not support CMCI (if any).
97  */
98 DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
99 	[0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
100 };
101 
102 /*
103  * MCA banks controlled through firmware first for corrected errors.
104  * This is a global list of banks for which we won't enable CMCI and we
105  * won't poll. Firmware controls these banks and is responsible for
106  * reporting corrected errors through GHES. Uncorrected/recoverable
107  * errors are still notified through a machine check.
108  */
109 mce_banks_t mce_banks_ce_disabled;
110 
111 static struct work_struct mce_work;
112 static struct irq_work mce_irq_work;
113 
114 /*
115  * CPU/chipset specific EDAC code can register a notifier call here to print
116  * MCE errors in a human-readable form.
117  */
118 BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain);
119 
120 /* Do initial initialization of a struct mce */
121 void mce_setup(struct mce *m)
122 {
123 	memset(m, 0, sizeof(struct mce));
124 	m->cpu = m->extcpu = smp_processor_id();
125 	/* need the internal __ version to avoid deadlocks */
126 	m->time = __ktime_get_real_seconds();
127 	m->cpuvendor = boot_cpu_data.x86_vendor;
128 	m->cpuid = cpuid_eax(1);
129 	m->socketid = cpu_data(m->extcpu).topo.pkg_id;
130 	m->apicid = cpu_data(m->extcpu).topo.initial_apicid;
131 	m->mcgcap = __rdmsr(MSR_IA32_MCG_CAP);
132 	m->ppin = cpu_data(m->extcpu).ppin;
133 	m->microcode = boot_cpu_data.microcode;
134 }
135 
136 DEFINE_PER_CPU(struct mce, injectm);
137 EXPORT_PER_CPU_SYMBOL_GPL(injectm);
138 
139 void mce_log(struct mce *m)
140 {
141 	if (!mce_gen_pool_add(m))
142 		irq_work_queue(&mce_irq_work);
143 }
144 EXPORT_SYMBOL_GPL(mce_log);
145 
146 void mce_register_decode_chain(struct notifier_block *nb)
147 {
148 	if (WARN_ON(nb->priority < MCE_PRIO_LOWEST ||
149 		    nb->priority > MCE_PRIO_HIGHEST))
150 		return;
151 
152 	blocking_notifier_chain_register(&x86_mce_decoder_chain, nb);
153 }
154 EXPORT_SYMBOL_GPL(mce_register_decode_chain);
155 
156 void mce_unregister_decode_chain(struct notifier_block *nb)
157 {
158 	blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
159 }
160 EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
161 
162 static void __print_mce(struct mce *m)
163 {
164 	pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n",
165 		 m->extcpu,
166 		 (m->mcgstatus & MCG_STATUS_MCIP ? " Exception" : ""),
167 		 m->mcgstatus, m->bank, m->status);
168 
169 	if (m->ip) {
170 		pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
171 			!(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
172 			m->cs, m->ip);
173 
174 		if (m->cs == __KERNEL_CS)
175 			pr_cont("{%pS}", (void *)(unsigned long)m->ip);
176 		pr_cont("\n");
177 	}
178 
179 	pr_emerg(HW_ERR "TSC %llx ", m->tsc);
180 	if (m->addr)
181 		pr_cont("ADDR %llx ", m->addr);
182 	if (m->misc)
183 		pr_cont("MISC %llx ", m->misc);
184 	if (m->ppin)
185 		pr_cont("PPIN %llx ", m->ppin);
186 
187 	if (mce_flags.smca) {
188 		if (m->synd)
189 			pr_cont("SYND %llx ", m->synd);
190 		if (m->ipid)
191 			pr_cont("IPID %llx ", m->ipid);
192 	}
193 
194 	pr_cont("\n");
195 
196 	/*
197 	 * Note this output is parsed by external tools and old fields
198 	 * should not be changed.
199 	 */
200 	pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
201 		m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
202 		m->microcode);
203 }
204 
205 static void print_mce(struct mce *m)
206 {
207 	__print_mce(m);
208 
209 	if (m->cpuvendor != X86_VENDOR_AMD && m->cpuvendor != X86_VENDOR_HYGON)
210 		pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
211 }
212 
213 #define PANIC_TIMEOUT 5 /* 5 seconds */
214 
215 static atomic_t mce_panicked;
216 
217 static int fake_panic;
218 static atomic_t mce_fake_panicked;
219 
220 /* Panic in progress. Enable interrupts and wait for final IPI */
221 static void wait_for_panic(void)
222 {
223 	long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
224 
225 	preempt_disable();
226 	local_irq_enable();
227 	while (timeout-- > 0)
228 		udelay(1);
229 	if (panic_timeout == 0)
230 		panic_timeout = mca_cfg.panic_timeout;
231 	panic("Panicing machine check CPU died");
232 }
233 
234 static const char *mce_dump_aux_info(struct mce *m)
235 {
236 	if (boot_cpu_has_bug(X86_BUG_TDX_PW_MCE))
237 		return tdx_dump_mce_info(m);
238 
239 	return NULL;
240 }
241 
242 static noinstr void mce_panic(const char *msg, struct mce *final, char *exp)
243 {
244 	struct llist_node *pending;
245 	struct mce_evt_llist *l;
246 	int apei_err = 0;
247 	const char *memmsg;
248 
249 	/*
250 	 * Allow instrumentation around external facilities usage. Not that it
251 	 * matters a whole lot since the machine is going to panic anyway.
252 	 */
253 	instrumentation_begin();
254 
255 	if (!fake_panic) {
256 		/*
257 		 * Make sure only one CPU runs in machine check panic
258 		 */
259 		if (atomic_inc_return(&mce_panicked) > 1)
260 			wait_for_panic();
261 		barrier();
262 
263 		bust_spinlocks(1);
264 		console_verbose();
265 	} else {
266 		/* Don't log too much for fake panic */
267 		if (atomic_inc_return(&mce_fake_panicked) > 1)
268 			goto out;
269 	}
270 	pending = mce_gen_pool_prepare_records();
271 	/* First print corrected ones that are still unlogged */
272 	llist_for_each_entry(l, pending, llnode) {
273 		struct mce *m = &l->mce;
274 		if (!(m->status & MCI_STATUS_UC)) {
275 			print_mce(m);
276 			if (!apei_err)
277 				apei_err = apei_write_mce(m);
278 		}
279 	}
280 	/* Now print uncorrected but with the final one last */
281 	llist_for_each_entry(l, pending, llnode) {
282 		struct mce *m = &l->mce;
283 		if (!(m->status & MCI_STATUS_UC))
284 			continue;
285 		if (!final || mce_cmp(m, final)) {
286 			print_mce(m);
287 			if (!apei_err)
288 				apei_err = apei_write_mce(m);
289 		}
290 	}
291 	if (final) {
292 		print_mce(final);
293 		if (!apei_err)
294 			apei_err = apei_write_mce(final);
295 	}
296 	if (exp)
297 		pr_emerg(HW_ERR "Machine check: %s\n", exp);
298 
299 	memmsg = mce_dump_aux_info(final);
300 	if (memmsg)
301 		pr_emerg(HW_ERR "Machine check: %s\n", memmsg);
302 
303 	if (!fake_panic) {
304 		if (panic_timeout == 0)
305 			panic_timeout = mca_cfg.panic_timeout;
306 
307 		/*
308 		 * Kdump skips the poisoned page in order to avoid
309 		 * touching the error bits again. Poison the page even
310 		 * if the error is fatal and the machine is about to
311 		 * panic.
312 		 */
313 		if (kexec_crash_loaded()) {
314 			if (final && (final->status & MCI_STATUS_ADDRV)) {
315 				struct page *p;
316 				p = pfn_to_online_page(final->addr >> PAGE_SHIFT);
317 				if (p)
318 					SetPageHWPoison(p);
319 			}
320 		}
321 		panic(msg);
322 	} else
323 		pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
324 
325 out:
326 	instrumentation_end();
327 }
328 
329 /* Support code for software error injection */
330 
331 static int msr_to_offset(u32 msr)
332 {
333 	unsigned bank = __this_cpu_read(injectm.bank);
334 
335 	if (msr == mca_cfg.rip_msr)
336 		return offsetof(struct mce, ip);
337 	if (msr == mca_msr_reg(bank, MCA_STATUS))
338 		return offsetof(struct mce, status);
339 	if (msr == mca_msr_reg(bank, MCA_ADDR))
340 		return offsetof(struct mce, addr);
341 	if (msr == mca_msr_reg(bank, MCA_MISC))
342 		return offsetof(struct mce, misc);
343 	if (msr == MSR_IA32_MCG_STATUS)
344 		return offsetof(struct mce, mcgstatus);
345 	return -1;
346 }
347 
348 void ex_handler_msr_mce(struct pt_regs *regs, bool wrmsr)
349 {
350 	if (wrmsr) {
351 		pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
352 			 (unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax,
353 			 regs->ip, (void *)regs->ip);
354 	} else {
355 		pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
356 			 (unsigned int)regs->cx, regs->ip, (void *)regs->ip);
357 	}
358 
359 	show_stack_regs(regs);
360 
361 	panic("MCA architectural violation!\n");
362 
363 	while (true)
364 		cpu_relax();
365 }
366 
367 /* MSR access wrappers used for error injection */
368 noinstr u64 mce_rdmsrl(u32 msr)
369 {
370 	DECLARE_ARGS(val, low, high);
371 
372 	if (__this_cpu_read(injectm.finished)) {
373 		int offset;
374 		u64 ret;
375 
376 		instrumentation_begin();
377 
378 		offset = msr_to_offset(msr);
379 		if (offset < 0)
380 			ret = 0;
381 		else
382 			ret = *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
383 
384 		instrumentation_end();
385 
386 		return ret;
387 	}
388 
389 	/*
390 	 * RDMSR on MCA MSRs should not fault. If they do, this is very much an
391 	 * architectural violation and needs to be reported to hw vendor. Panic
392 	 * the box to not allow any further progress.
393 	 */
394 	asm volatile("1: rdmsr\n"
395 		     "2:\n"
396 		     _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_RDMSR_IN_MCE)
397 		     : EAX_EDX_RET(val, low, high) : "c" (msr));
398 
399 
400 	return EAX_EDX_VAL(val, low, high);
401 }
402 
403 static noinstr void mce_wrmsrl(u32 msr, u64 v)
404 {
405 	u32 low, high;
406 
407 	if (__this_cpu_read(injectm.finished)) {
408 		int offset;
409 
410 		instrumentation_begin();
411 
412 		offset = msr_to_offset(msr);
413 		if (offset >= 0)
414 			*(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
415 
416 		instrumentation_end();
417 
418 		return;
419 	}
420 
421 	low  = (u32)v;
422 	high = (u32)(v >> 32);
423 
424 	/* See comment in mce_rdmsrl() */
425 	asm volatile("1: wrmsr\n"
426 		     "2:\n"
427 		     _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR_IN_MCE)
428 		     : : "c" (msr), "a"(low), "d" (high) : "memory");
429 }
430 
431 /*
432  * Collect all global (w.r.t. this processor) status about this machine
433  * check into our "mce" struct so that we can use it later to assess
434  * the severity of the problem as we read per-bank specific details.
435  */
436 static noinstr void mce_gather_info(struct mce *m, struct pt_regs *regs)
437 {
438 	/*
439 	 * Enable instrumentation around mce_setup() which calls external
440 	 * facilities.
441 	 */
442 	instrumentation_begin();
443 	mce_setup(m);
444 	instrumentation_end();
445 
446 	m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
447 	if (regs) {
448 		/*
449 		 * Get the address of the instruction at the time of
450 		 * the machine check error.
451 		 */
452 		if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
453 			m->ip = regs->ip;
454 			m->cs = regs->cs;
455 
456 			/*
457 			 * When in VM86 mode make the cs look like ring 3
458 			 * always. This is a lie, but it's better than passing
459 			 * the additional vm86 bit around everywhere.
460 			 */
461 			if (v8086_mode(regs))
462 				m->cs |= 3;
463 		}
464 		/* Use accurate RIP reporting if available. */
465 		if (mca_cfg.rip_msr)
466 			m->ip = mce_rdmsrl(mca_cfg.rip_msr);
467 	}
468 }
469 
470 int mce_available(struct cpuinfo_x86 *c)
471 {
472 	if (mca_cfg.disabled)
473 		return 0;
474 	return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
475 }
476 
477 static void mce_schedule_work(void)
478 {
479 	if (!mce_gen_pool_empty())
480 		schedule_work(&mce_work);
481 }
482 
483 static void mce_irq_work_cb(struct irq_work *entry)
484 {
485 	mce_schedule_work();
486 }
487 
488 bool mce_usable_address(struct mce *m)
489 {
490 	if (!(m->status & MCI_STATUS_ADDRV))
491 		return false;
492 
493 	switch (m->cpuvendor) {
494 	case X86_VENDOR_AMD:
495 		return amd_mce_usable_address(m);
496 
497 	case X86_VENDOR_INTEL:
498 	case X86_VENDOR_ZHAOXIN:
499 		return intel_mce_usable_address(m);
500 
501 	default:
502 		return true;
503 	}
504 }
505 EXPORT_SYMBOL_GPL(mce_usable_address);
506 
507 bool mce_is_memory_error(struct mce *m)
508 {
509 	switch (m->cpuvendor) {
510 	case X86_VENDOR_AMD:
511 	case X86_VENDOR_HYGON:
512 		return amd_mce_is_memory_error(m);
513 
514 	case X86_VENDOR_INTEL:
515 	case X86_VENDOR_ZHAOXIN:
516 		/*
517 		 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
518 		 *
519 		 * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for
520 		 * indicating a memory error. Bit 8 is used for indicating a
521 		 * cache hierarchy error. The combination of bit 2 and bit 3
522 		 * is used for indicating a `generic' cache hierarchy error
523 		 * But we can't just blindly check the above bits, because if
524 		 * bit 11 is set, then it is a bus/interconnect error - and
525 		 * either way the above bits just gives more detail on what
526 		 * bus/interconnect error happened. Note that bit 12 can be
527 		 * ignored, as it's the "filter" bit.
528 		 */
529 		return (m->status & 0xef80) == BIT(7) ||
530 		       (m->status & 0xef00) == BIT(8) ||
531 		       (m->status & 0xeffc) == 0xc;
532 
533 	default:
534 		return false;
535 	}
536 }
537 EXPORT_SYMBOL_GPL(mce_is_memory_error);
538 
539 static bool whole_page(struct mce *m)
540 {
541 	if (!mca_cfg.ser || !(m->status & MCI_STATUS_MISCV))
542 		return true;
543 
544 	return MCI_MISC_ADDR_LSB(m->misc) >= PAGE_SHIFT;
545 }
546 
547 bool mce_is_correctable(struct mce *m)
548 {
549 	if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
550 		return false;
551 
552 	if (m->cpuvendor == X86_VENDOR_HYGON && m->status & MCI_STATUS_DEFERRED)
553 		return false;
554 
555 	if (m->status & MCI_STATUS_UC)
556 		return false;
557 
558 	return true;
559 }
560 EXPORT_SYMBOL_GPL(mce_is_correctable);
561 
562 static int mce_early_notifier(struct notifier_block *nb, unsigned long val,
563 			      void *data)
564 {
565 	struct mce *m = (struct mce *)data;
566 
567 	if (!m)
568 		return NOTIFY_DONE;
569 
570 	/* Emit the trace record: */
571 	trace_mce_record(m);
572 
573 	set_bit(0, &mce_need_notify);
574 
575 	mce_notify_irq();
576 
577 	return NOTIFY_DONE;
578 }
579 
580 static struct notifier_block early_nb = {
581 	.notifier_call	= mce_early_notifier,
582 	.priority	= MCE_PRIO_EARLY,
583 };
584 
585 static int uc_decode_notifier(struct notifier_block *nb, unsigned long val,
586 			      void *data)
587 {
588 	struct mce *mce = (struct mce *)data;
589 	unsigned long pfn;
590 
591 	if (!mce || !mce_usable_address(mce))
592 		return NOTIFY_DONE;
593 
594 	if (mce->severity != MCE_AO_SEVERITY &&
595 	    mce->severity != MCE_DEFERRED_SEVERITY)
596 		return NOTIFY_DONE;
597 
598 	pfn = (mce->addr & MCI_ADDR_PHYSADDR) >> PAGE_SHIFT;
599 	if (!memory_failure(pfn, 0)) {
600 		set_mce_nospec(pfn);
601 		mce->kflags |= MCE_HANDLED_UC;
602 	}
603 
604 	return NOTIFY_OK;
605 }
606 
607 static struct notifier_block mce_uc_nb = {
608 	.notifier_call	= uc_decode_notifier,
609 	.priority	= MCE_PRIO_UC,
610 };
611 
612 static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
613 				void *data)
614 {
615 	struct mce *m = (struct mce *)data;
616 
617 	if (!m)
618 		return NOTIFY_DONE;
619 
620 	if (mca_cfg.print_all || !m->kflags)
621 		__print_mce(m);
622 
623 	return NOTIFY_DONE;
624 }
625 
626 static struct notifier_block mce_default_nb = {
627 	.notifier_call	= mce_default_notifier,
628 	/* lowest prio, we want it to run last. */
629 	.priority	= MCE_PRIO_LOWEST,
630 };
631 
632 /*
633  * Read ADDR and MISC registers.
634  */
635 static noinstr void mce_read_aux(struct mce *m, int i)
636 {
637 	if (m->status & MCI_STATUS_MISCV)
638 		m->misc = mce_rdmsrl(mca_msr_reg(i, MCA_MISC));
639 
640 	if (m->status & MCI_STATUS_ADDRV) {
641 		m->addr = mce_rdmsrl(mca_msr_reg(i, MCA_ADDR));
642 
643 		/*
644 		 * Mask the reported address by the reported granularity.
645 		 */
646 		if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) {
647 			u8 shift = MCI_MISC_ADDR_LSB(m->misc);
648 			m->addr >>= shift;
649 			m->addr <<= shift;
650 		}
651 
652 		smca_extract_err_addr(m);
653 	}
654 
655 	if (mce_flags.smca) {
656 		m->ipid = mce_rdmsrl(MSR_AMD64_SMCA_MCx_IPID(i));
657 
658 		if (m->status & MCI_STATUS_SYNDV)
659 			m->synd = mce_rdmsrl(MSR_AMD64_SMCA_MCx_SYND(i));
660 	}
661 }
662 
663 DEFINE_PER_CPU(unsigned, mce_poll_count);
664 
665 /*
666  * Poll for corrected events or events that happened before reset.
667  * Those are just logged through /dev/mcelog.
668  *
669  * This is executed in standard interrupt context.
670  *
671  * Note: spec recommends to panic for fatal unsignalled
672  * errors here. However this would be quite problematic --
673  * we would need to reimplement the Monarch handling and
674  * it would mess up the exclusion between exception handler
675  * and poll handler -- * so we skip this for now.
676  * These cases should not happen anyways, or only when the CPU
677  * is already totally * confused. In this case it's likely it will
678  * not fully execute the machine check handler either.
679  */
680 bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
681 {
682 	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
683 	bool error_seen = false;
684 	struct mce m;
685 	int i;
686 
687 	this_cpu_inc(mce_poll_count);
688 
689 	mce_gather_info(&m, NULL);
690 
691 	if (flags & MCP_TIMESTAMP)
692 		m.tsc = rdtsc();
693 
694 	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
695 		if (!mce_banks[i].ctl || !test_bit(i, *b))
696 			continue;
697 
698 		m.misc = 0;
699 		m.addr = 0;
700 		m.bank = i;
701 
702 		barrier();
703 		m.status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS));
704 
705 		/*
706 		 * Update storm tracking here, before checking for the
707 		 * MCI_STATUS_VAL bit. Valid corrected errors count
708 		 * towards declaring, or maintaining, storm status. No
709 		 * error in a bank counts towards avoiding, or ending,
710 		 * storm status.
711 		 */
712 		if (!mca_cfg.cmci_disabled)
713 			mce_track_storm(&m);
714 
715 		/* If this entry is not valid, ignore it */
716 		if (!(m.status & MCI_STATUS_VAL))
717 			continue;
718 
719 		/*
720 		 * If we are logging everything (at CPU online) or this
721 		 * is a corrected error, then we must log it.
722 		 */
723 		if ((flags & MCP_UC) || !(m.status & MCI_STATUS_UC))
724 			goto log_it;
725 
726 		/*
727 		 * Newer Intel systems that support software error
728 		 * recovery need to make additional checks. Other
729 		 * CPUs should skip over uncorrected errors, but log
730 		 * everything else.
731 		 */
732 		if (!mca_cfg.ser) {
733 			if (m.status & MCI_STATUS_UC)
734 				continue;
735 			goto log_it;
736 		}
737 
738 		/* Log "not enabled" (speculative) errors */
739 		if (!(m.status & MCI_STATUS_EN))
740 			goto log_it;
741 
742 		/*
743 		 * Log UCNA (SDM: 15.6.3 "UCR Error Classification")
744 		 * UC == 1 && PCC == 0 && S == 0
745 		 */
746 		if (!(m.status & MCI_STATUS_PCC) && !(m.status & MCI_STATUS_S))
747 			goto log_it;
748 
749 		/*
750 		 * Skip anything else. Presumption is that our read of this
751 		 * bank is racing with a machine check. Leave the log alone
752 		 * for do_machine_check() to deal with it.
753 		 */
754 		continue;
755 
756 log_it:
757 		error_seen = true;
758 
759 		if (flags & MCP_DONTLOG)
760 			goto clear_it;
761 
762 		mce_read_aux(&m, i);
763 		m.severity = mce_severity(&m, NULL, NULL, false);
764 		/*
765 		 * Don't get the IP here because it's unlikely to
766 		 * have anything to do with the actual error location.
767 		 */
768 
769 		if (mca_cfg.dont_log_ce && !mce_usable_address(&m))
770 			goto clear_it;
771 
772 		if (flags & MCP_QUEUE_LOG)
773 			mce_gen_pool_add(&m);
774 		else
775 			mce_log(&m);
776 
777 clear_it:
778 		/*
779 		 * Clear state for this bank.
780 		 */
781 		mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0);
782 	}
783 
784 	/*
785 	 * Don't clear MCG_STATUS here because it's only defined for
786 	 * exceptions.
787 	 */
788 
789 	sync_core();
790 
791 	return error_seen;
792 }
793 EXPORT_SYMBOL_GPL(machine_check_poll);
794 
795 /*
796  * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
797  * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
798  * Vol 3B Table 15-20). But this confuses both the code that determines
799  * whether the machine check occurred in kernel or user mode, and also
800  * the severity assessment code. Pretend that EIPV was set, and take the
801  * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
802  */
803 static __always_inline void
804 quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
805 {
806 	if (bank != 0)
807 		return;
808 	if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
809 		return;
810 	if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
811 		          MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
812 			  MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
813 			  MCACOD)) !=
814 			 (MCI_STATUS_UC|MCI_STATUS_EN|
815 			  MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
816 			  MCI_STATUS_AR|MCACOD_INSTR))
817 		return;
818 
819 	m->mcgstatus |= MCG_STATUS_EIPV;
820 	m->ip = regs->ip;
821 	m->cs = regs->cs;
822 }
823 
824 /*
825  * Disable fast string copy and return from the MCE handler upon the first SRAR
826  * MCE on bank 1 due to a CPU erratum on Intel Skylake/Cascade Lake/Cooper Lake
827  * CPUs.
828  * The fast string copy instructions ("REP; MOVS*") could consume an
829  * uncorrectable memory error in the cache line _right after_ the desired region
830  * to copy and raise an MCE with RIP pointing to the instruction _after_ the
831  * "REP; MOVS*".
832  * This mitigation addresses the issue completely with the caveat of performance
833  * degradation on the CPU affected. This is still better than the OS crashing on
834  * MCEs raised on an irrelevant process due to "REP; MOVS*" accesses from a
835  * kernel context (e.g., copy_page).
836  *
837  * Returns true when fast string copy on CPU has been disabled.
838  */
839 static noinstr bool quirk_skylake_repmov(void)
840 {
841 	u64 mcgstatus   = mce_rdmsrl(MSR_IA32_MCG_STATUS);
842 	u64 misc_enable = mce_rdmsrl(MSR_IA32_MISC_ENABLE);
843 	u64 mc1_status;
844 
845 	/*
846 	 * Apply the quirk only to local machine checks, i.e., no broadcast
847 	 * sync is needed.
848 	 */
849 	if (!(mcgstatus & MCG_STATUS_LMCES) ||
850 	    !(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING))
851 		return false;
852 
853 	mc1_status = mce_rdmsrl(MSR_IA32_MCx_STATUS(1));
854 
855 	/* Check for a software-recoverable data fetch error. */
856 	if ((mc1_status &
857 	     (MCI_STATUS_VAL | MCI_STATUS_OVER | MCI_STATUS_UC | MCI_STATUS_EN |
858 	      MCI_STATUS_ADDRV | MCI_STATUS_MISCV | MCI_STATUS_PCC |
859 	      MCI_STATUS_AR | MCI_STATUS_S)) ==
860 	     (MCI_STATUS_VAL |                   MCI_STATUS_UC | MCI_STATUS_EN |
861 	      MCI_STATUS_ADDRV | MCI_STATUS_MISCV |
862 	      MCI_STATUS_AR | MCI_STATUS_S)) {
863 		misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
864 		mce_wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
865 		mce_wrmsrl(MSR_IA32_MCx_STATUS(1), 0);
866 
867 		instrumentation_begin();
868 		pr_err_once("Erratum detected, disable fast string copy instructions.\n");
869 		instrumentation_end();
870 
871 		return true;
872 	}
873 
874 	return false;
875 }
876 
877 /*
878  * Some Zen-based Instruction Fetch Units set EIPV=RIPV=0 on poison consumption
879  * errors. This means mce_gather_info() will not save the "ip" and "cs" registers.
880  *
881  * However, the context is still valid, so save the "cs" register for later use.
882  *
883  * The "ip" register is truly unknown, so don't save it or fixup EIPV/RIPV.
884  *
885  * The Instruction Fetch Unit is at MCA bank 1 for all affected systems.
886  */
887 static __always_inline void quirk_zen_ifu(int bank, struct mce *m, struct pt_regs *regs)
888 {
889 	if (bank != 1)
890 		return;
891 	if (!(m->status & MCI_STATUS_POISON))
892 		return;
893 
894 	m->cs = regs->cs;
895 }
896 
897 /*
898  * Do a quick check if any of the events requires a panic.
899  * This decides if we keep the events around or clear them.
900  */
901 static __always_inline int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
902 					  struct pt_regs *regs)
903 {
904 	char *tmp = *msg;
905 	int i;
906 
907 	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
908 		m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS));
909 		if (!(m->status & MCI_STATUS_VAL))
910 			continue;
911 
912 		arch___set_bit(i, validp);
913 		if (mce_flags.snb_ifu_quirk)
914 			quirk_sandybridge_ifu(i, m, regs);
915 
916 		if (mce_flags.zen_ifu_quirk)
917 			quirk_zen_ifu(i, m, regs);
918 
919 		m->bank = i;
920 		if (mce_severity(m, regs, &tmp, true) >= MCE_PANIC_SEVERITY) {
921 			mce_read_aux(m, i);
922 			*msg = tmp;
923 			return 1;
924 		}
925 	}
926 	return 0;
927 }
928 
929 /*
930  * Variable to establish order between CPUs while scanning.
931  * Each CPU spins initially until executing is equal its number.
932  */
933 static atomic_t mce_executing;
934 
935 /*
936  * Defines order of CPUs on entry. First CPU becomes Monarch.
937  */
938 static atomic_t mce_callin;
939 
940 /*
941  * Track which CPUs entered the MCA broadcast synchronization and which not in
942  * order to print holdouts.
943  */
944 static cpumask_t mce_missing_cpus = CPU_MASK_ALL;
945 
946 /*
947  * Check if a timeout waiting for other CPUs happened.
948  */
949 static noinstr int mce_timed_out(u64 *t, const char *msg)
950 {
951 	int ret = 0;
952 
953 	/* Enable instrumentation around calls to external facilities */
954 	instrumentation_begin();
955 
956 	/*
957 	 * The others already did panic for some reason.
958 	 * Bail out like in a timeout.
959 	 * rmb() to tell the compiler that system_state
960 	 * might have been modified by someone else.
961 	 */
962 	rmb();
963 	if (atomic_read(&mce_panicked))
964 		wait_for_panic();
965 	if (!mca_cfg.monarch_timeout)
966 		goto out;
967 	if ((s64)*t < SPINUNIT) {
968 		if (cpumask_and(&mce_missing_cpus, cpu_online_mask, &mce_missing_cpus))
969 			pr_emerg("CPUs not responding to MCE broadcast (may include false positives): %*pbl\n",
970 				 cpumask_pr_args(&mce_missing_cpus));
971 		mce_panic(msg, NULL, NULL);
972 
973 		ret = 1;
974 		goto out;
975 	}
976 	*t -= SPINUNIT;
977 
978 out:
979 	touch_nmi_watchdog();
980 
981 	instrumentation_end();
982 
983 	return ret;
984 }
985 
986 /*
987  * The Monarch's reign.  The Monarch is the CPU who entered
988  * the machine check handler first. It waits for the others to
989  * raise the exception too and then grades them. When any
990  * error is fatal panic. Only then let the others continue.
991  *
992  * The other CPUs entering the MCE handler will be controlled by the
993  * Monarch. They are called Subjects.
994  *
995  * This way we prevent any potential data corruption in a unrecoverable case
996  * and also makes sure always all CPU's errors are examined.
997  *
998  * Also this detects the case of a machine check event coming from outer
999  * space (not detected by any CPUs) In this case some external agent wants
1000  * us to shut down, so panic too.
1001  *
1002  * The other CPUs might still decide to panic if the handler happens
1003  * in a unrecoverable place, but in this case the system is in a semi-stable
1004  * state and won't corrupt anything by itself. It's ok to let the others
1005  * continue for a bit first.
1006  *
1007  * All the spin loops have timeouts; when a timeout happens a CPU
1008  * typically elects itself to be Monarch.
1009  */
1010 static void mce_reign(void)
1011 {
1012 	int cpu;
1013 	struct mce *m = NULL;
1014 	int global_worst = 0;
1015 	char *msg = NULL;
1016 
1017 	/*
1018 	 * This CPU is the Monarch and the other CPUs have run
1019 	 * through their handlers.
1020 	 * Grade the severity of the errors of all the CPUs.
1021 	 */
1022 	for_each_possible_cpu(cpu) {
1023 		struct mce *mtmp = &per_cpu(mces_seen, cpu);
1024 
1025 		if (mtmp->severity > global_worst) {
1026 			global_worst = mtmp->severity;
1027 			m = &per_cpu(mces_seen, cpu);
1028 		}
1029 	}
1030 
1031 	/*
1032 	 * Cannot recover? Panic here then.
1033 	 * This dumps all the mces in the log buffer and stops the
1034 	 * other CPUs.
1035 	 */
1036 	if (m && global_worst >= MCE_PANIC_SEVERITY) {
1037 		/* call mce_severity() to get "msg" for panic */
1038 		mce_severity(m, NULL, &msg, true);
1039 		mce_panic("Fatal machine check", m, msg);
1040 	}
1041 
1042 	/*
1043 	 * For UC somewhere we let the CPU who detects it handle it.
1044 	 * Also must let continue the others, otherwise the handling
1045 	 * CPU could deadlock on a lock.
1046 	 */
1047 
1048 	/*
1049 	 * No machine check event found. Must be some external
1050 	 * source or one CPU is hung. Panic.
1051 	 */
1052 	if (global_worst <= MCE_KEEP_SEVERITY)
1053 		mce_panic("Fatal machine check from unknown source", NULL, NULL);
1054 
1055 	/*
1056 	 * Now clear all the mces_seen so that they don't reappear on
1057 	 * the next mce.
1058 	 */
1059 	for_each_possible_cpu(cpu)
1060 		memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
1061 }
1062 
1063 static atomic_t global_nwo;
1064 
1065 /*
1066  * Start of Monarch synchronization. This waits until all CPUs have
1067  * entered the exception handler and then determines if any of them
1068  * saw a fatal event that requires panic. Then it executes them
1069  * in the entry order.
1070  * TBD double check parallel CPU hotunplug
1071  */
1072 static noinstr int mce_start(int *no_way_out)
1073 {
1074 	u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
1075 	int order, ret = -1;
1076 
1077 	if (!timeout)
1078 		return ret;
1079 
1080 	raw_atomic_add(*no_way_out, &global_nwo);
1081 	/*
1082 	 * Rely on the implied barrier below, such that global_nwo
1083 	 * is updated before mce_callin.
1084 	 */
1085 	order = raw_atomic_inc_return(&mce_callin);
1086 	arch_cpumask_clear_cpu(smp_processor_id(), &mce_missing_cpus);
1087 
1088 	/* Enable instrumentation around calls to external facilities */
1089 	instrumentation_begin();
1090 
1091 	/*
1092 	 * Wait for everyone.
1093 	 */
1094 	while (raw_atomic_read(&mce_callin) != num_online_cpus()) {
1095 		if (mce_timed_out(&timeout,
1096 				  "Timeout: Not all CPUs entered broadcast exception handler")) {
1097 			raw_atomic_set(&global_nwo, 0);
1098 			goto out;
1099 		}
1100 		ndelay(SPINUNIT);
1101 	}
1102 
1103 	/*
1104 	 * mce_callin should be read before global_nwo
1105 	 */
1106 	smp_rmb();
1107 
1108 	if (order == 1) {
1109 		/*
1110 		 * Monarch: Starts executing now, the others wait.
1111 		 */
1112 		raw_atomic_set(&mce_executing, 1);
1113 	} else {
1114 		/*
1115 		 * Subject: Now start the scanning loop one by one in
1116 		 * the original callin order.
1117 		 * This way when there are any shared banks it will be
1118 		 * only seen by one CPU before cleared, avoiding duplicates.
1119 		 */
1120 		while (raw_atomic_read(&mce_executing) < order) {
1121 			if (mce_timed_out(&timeout,
1122 					  "Timeout: Subject CPUs unable to finish machine check processing")) {
1123 				raw_atomic_set(&global_nwo, 0);
1124 				goto out;
1125 			}
1126 			ndelay(SPINUNIT);
1127 		}
1128 	}
1129 
1130 	/*
1131 	 * Cache the global no_way_out state.
1132 	 */
1133 	*no_way_out = raw_atomic_read(&global_nwo);
1134 
1135 	ret = order;
1136 
1137 out:
1138 	instrumentation_end();
1139 
1140 	return ret;
1141 }
1142 
1143 /*
1144  * Synchronize between CPUs after main scanning loop.
1145  * This invokes the bulk of the Monarch processing.
1146  */
1147 static noinstr int mce_end(int order)
1148 {
1149 	u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
1150 	int ret = -1;
1151 
1152 	/* Allow instrumentation around external facilities. */
1153 	instrumentation_begin();
1154 
1155 	if (!timeout)
1156 		goto reset;
1157 	if (order < 0)
1158 		goto reset;
1159 
1160 	/*
1161 	 * Allow others to run.
1162 	 */
1163 	atomic_inc(&mce_executing);
1164 
1165 	if (order == 1) {
1166 		/*
1167 		 * Monarch: Wait for everyone to go through their scanning
1168 		 * loops.
1169 		 */
1170 		while (atomic_read(&mce_executing) <= num_online_cpus()) {
1171 			if (mce_timed_out(&timeout,
1172 					  "Timeout: Monarch CPU unable to finish machine check processing"))
1173 				goto reset;
1174 			ndelay(SPINUNIT);
1175 		}
1176 
1177 		mce_reign();
1178 		barrier();
1179 		ret = 0;
1180 	} else {
1181 		/*
1182 		 * Subject: Wait for Monarch to finish.
1183 		 */
1184 		while (atomic_read(&mce_executing) != 0) {
1185 			if (mce_timed_out(&timeout,
1186 					  "Timeout: Monarch CPU did not finish machine check processing"))
1187 				goto reset;
1188 			ndelay(SPINUNIT);
1189 		}
1190 
1191 		/*
1192 		 * Don't reset anything. That's done by the Monarch.
1193 		 */
1194 		ret = 0;
1195 		goto out;
1196 	}
1197 
1198 	/*
1199 	 * Reset all global state.
1200 	 */
1201 reset:
1202 	atomic_set(&global_nwo, 0);
1203 	atomic_set(&mce_callin, 0);
1204 	cpumask_setall(&mce_missing_cpus);
1205 	barrier();
1206 
1207 	/*
1208 	 * Let others run again.
1209 	 */
1210 	atomic_set(&mce_executing, 0);
1211 
1212 out:
1213 	instrumentation_end();
1214 
1215 	return ret;
1216 }
1217 
1218 static __always_inline void mce_clear_state(unsigned long *toclear)
1219 {
1220 	int i;
1221 
1222 	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1223 		if (arch_test_bit(i, toclear))
1224 			mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0);
1225 	}
1226 }
1227 
1228 /*
1229  * Cases where we avoid rendezvous handler timeout:
1230  * 1) If this CPU is offline.
1231  *
1232  * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to
1233  *  skip those CPUs which remain looping in the 1st kernel - see
1234  *  crash_nmi_callback().
1235  *
1236  * Note: there still is a small window between kexec-ing and the new,
1237  * kdump kernel establishing a new #MC handler where a broadcasted MCE
1238  * might not get handled properly.
1239  */
1240 static noinstr bool mce_check_crashing_cpu(void)
1241 {
1242 	unsigned int cpu = smp_processor_id();
1243 
1244 	if (arch_cpu_is_offline(cpu) ||
1245 	    (crashing_cpu != -1 && crashing_cpu != cpu)) {
1246 		u64 mcgstatus;
1247 
1248 		mcgstatus = __rdmsr(MSR_IA32_MCG_STATUS);
1249 
1250 		if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) {
1251 			if (mcgstatus & MCG_STATUS_LMCES)
1252 				return false;
1253 		}
1254 
1255 		if (mcgstatus & MCG_STATUS_RIPV) {
1256 			__wrmsr(MSR_IA32_MCG_STATUS, 0, 0);
1257 			return true;
1258 		}
1259 	}
1260 	return false;
1261 }
1262 
1263 static __always_inline int
1264 __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *final,
1265 		unsigned long *toclear, unsigned long *valid_banks, int no_way_out,
1266 		int *worst)
1267 {
1268 	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1269 	struct mca_config *cfg = &mca_cfg;
1270 	int severity, i, taint = 0;
1271 
1272 	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1273 		arch___clear_bit(i, toclear);
1274 		if (!arch_test_bit(i, valid_banks))
1275 			continue;
1276 
1277 		if (!mce_banks[i].ctl)
1278 			continue;
1279 
1280 		m->misc = 0;
1281 		m->addr = 0;
1282 		m->bank = i;
1283 
1284 		m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS));
1285 		if (!(m->status & MCI_STATUS_VAL))
1286 			continue;
1287 
1288 		/*
1289 		 * Corrected or non-signaled errors are handled by
1290 		 * machine_check_poll(). Leave them alone, unless this panics.
1291 		 */
1292 		if (!(m->status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
1293 			!no_way_out)
1294 			continue;
1295 
1296 		/* Set taint even when machine check was not enabled. */
1297 		taint++;
1298 
1299 		severity = mce_severity(m, regs, NULL, true);
1300 
1301 		/*
1302 		 * When machine check was for corrected/deferred handler don't
1303 		 * touch, unless we're panicking.
1304 		 */
1305 		if ((severity == MCE_KEEP_SEVERITY ||
1306 		     severity == MCE_UCNA_SEVERITY) && !no_way_out)
1307 			continue;
1308 
1309 		arch___set_bit(i, toclear);
1310 
1311 		/* Machine check event was not enabled. Clear, but ignore. */
1312 		if (severity == MCE_NO_SEVERITY)
1313 			continue;
1314 
1315 		mce_read_aux(m, i);
1316 
1317 		/* assuming valid severity level != 0 */
1318 		m->severity = severity;
1319 
1320 		/*
1321 		 * Enable instrumentation around the mce_log() call which is
1322 		 * done in #MC context, where instrumentation is disabled.
1323 		 */
1324 		instrumentation_begin();
1325 		mce_log(m);
1326 		instrumentation_end();
1327 
1328 		if (severity > *worst) {
1329 			*final = *m;
1330 			*worst = severity;
1331 		}
1332 	}
1333 
1334 	/* mce_clear_state will clear *final, save locally for use later */
1335 	*m = *final;
1336 
1337 	return taint;
1338 }
1339 
1340 static void kill_me_now(struct callback_head *ch)
1341 {
1342 	struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me);
1343 
1344 	p->mce_count = 0;
1345 	force_sig(SIGBUS);
1346 }
1347 
1348 static void kill_me_maybe(struct callback_head *cb)
1349 {
1350 	struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me);
1351 	int flags = MF_ACTION_REQUIRED;
1352 	unsigned long pfn;
1353 	int ret;
1354 
1355 	p->mce_count = 0;
1356 	pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
1357 
1358 	if (!p->mce_ripv)
1359 		flags |= MF_MUST_KILL;
1360 
1361 	pfn = (p->mce_addr & MCI_ADDR_PHYSADDR) >> PAGE_SHIFT;
1362 	ret = memory_failure(pfn, flags);
1363 	if (!ret) {
1364 		set_mce_nospec(pfn);
1365 		sync_core();
1366 		return;
1367 	}
1368 
1369 	/*
1370 	 * -EHWPOISON from memory_failure() means that it already sent SIGBUS
1371 	 * to the current process with the proper error info,
1372 	 * -EOPNOTSUPP means hwpoison_filter() filtered the error event,
1373 	 *
1374 	 * In both cases, no further processing is required.
1375 	 */
1376 	if (ret == -EHWPOISON || ret == -EOPNOTSUPP)
1377 		return;
1378 
1379 	pr_err("Memory error not recovered");
1380 	kill_me_now(cb);
1381 }
1382 
1383 static void kill_me_never(struct callback_head *cb)
1384 {
1385 	struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me);
1386 	unsigned long pfn;
1387 
1388 	p->mce_count = 0;
1389 	pr_err("Kernel accessed poison in user space at %llx\n", p->mce_addr);
1390 	pfn = (p->mce_addr & MCI_ADDR_PHYSADDR) >> PAGE_SHIFT;
1391 	if (!memory_failure(pfn, 0))
1392 		set_mce_nospec(pfn);
1393 }
1394 
1395 static void queue_task_work(struct mce *m, char *msg, void (*func)(struct callback_head *))
1396 {
1397 	int count = ++current->mce_count;
1398 
1399 	/* First call, save all the details */
1400 	if (count == 1) {
1401 		current->mce_addr = m->addr;
1402 		current->mce_kflags = m->kflags;
1403 		current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
1404 		current->mce_whole_page = whole_page(m);
1405 		current->mce_kill_me.func = func;
1406 	}
1407 
1408 	/* Ten is likely overkill. Don't expect more than two faults before task_work() */
1409 	if (count > 10)
1410 		mce_panic("Too many consecutive machine checks while accessing user data", m, msg);
1411 
1412 	/* Second or later call, make sure page address matches the one from first call */
1413 	if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT))
1414 		mce_panic("Consecutive machine checks to different user pages", m, msg);
1415 
1416 	/* Do not call task_work_add() more than once */
1417 	if (count > 1)
1418 		return;
1419 
1420 	task_work_add(current, &current->mce_kill_me, TWA_RESUME);
1421 }
1422 
1423 /* Handle unconfigured int18 (should never happen) */
1424 static noinstr void unexpected_machine_check(struct pt_regs *regs)
1425 {
1426 	instrumentation_begin();
1427 	pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
1428 	       smp_processor_id());
1429 	instrumentation_end();
1430 }
1431 
1432 /*
1433  * The actual machine check handler. This only handles real exceptions when
1434  * something got corrupted coming in through int 18.
1435  *
1436  * This is executed in #MC context not subject to normal locking rules.
1437  * This implies that most kernel services cannot be safely used. Don't even
1438  * think about putting a printk in there!
1439  *
1440  * On Intel systems this is entered on all CPUs in parallel through
1441  * MCE broadcast. However some CPUs might be broken beyond repair,
1442  * so be always careful when synchronizing with others.
1443  *
1444  * Tracing and kprobes are disabled: if we interrupted a kernel context
1445  * with IF=1, we need to minimize stack usage.  There are also recursion
1446  * issues: if the machine check was due to a failure of the memory
1447  * backing the user stack, tracing that reads the user stack will cause
1448  * potentially infinite recursion.
1449  *
1450  * Currently, the #MC handler calls out to a number of external facilities
1451  * and, therefore, allows instrumentation around them. The optimal thing to
1452  * have would be to do the absolutely minimal work required in #MC context
1453  * and have instrumentation disabled only around that. Further processing can
1454  * then happen in process context where instrumentation is allowed. Achieving
1455  * that requires careful auditing and modifications. Until then, the code
1456  * allows instrumentation temporarily, where required. *
1457  */
1458 noinstr void do_machine_check(struct pt_regs *regs)
1459 {
1460 	int worst = 0, order, no_way_out, kill_current_task, lmce, taint = 0;
1461 	DECLARE_BITMAP(valid_banks, MAX_NR_BANKS) = { 0 };
1462 	DECLARE_BITMAP(toclear, MAX_NR_BANKS) = { 0 };
1463 	struct mce m, *final;
1464 	char *msg = NULL;
1465 
1466 	if (unlikely(mce_flags.p5))
1467 		return pentium_machine_check(regs);
1468 	else if (unlikely(mce_flags.winchip))
1469 		return winchip_machine_check(regs);
1470 	else if (unlikely(!mca_cfg.initialized))
1471 		return unexpected_machine_check(regs);
1472 
1473 	if (mce_flags.skx_repmov_quirk && quirk_skylake_repmov())
1474 		goto clear;
1475 
1476 	/*
1477 	 * Establish sequential order between the CPUs entering the machine
1478 	 * check handler.
1479 	 */
1480 	order = -1;
1481 
1482 	/*
1483 	 * If no_way_out gets set, there is no safe way to recover from this
1484 	 * MCE.
1485 	 */
1486 	no_way_out = 0;
1487 
1488 	/*
1489 	 * If kill_current_task is not set, there might be a way to recover from this
1490 	 * error.
1491 	 */
1492 	kill_current_task = 0;
1493 
1494 	/*
1495 	 * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
1496 	 * on Intel.
1497 	 */
1498 	lmce = 1;
1499 
1500 	this_cpu_inc(mce_exception_count);
1501 
1502 	mce_gather_info(&m, regs);
1503 	m.tsc = rdtsc();
1504 
1505 	final = this_cpu_ptr(&mces_seen);
1506 	*final = m;
1507 
1508 	no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
1509 
1510 	barrier();
1511 
1512 	/*
1513 	 * When no restart IP might need to kill or panic.
1514 	 * Assume the worst for now, but if we find the
1515 	 * severity is MCE_AR_SEVERITY we have other options.
1516 	 */
1517 	if (!(m.mcgstatus & MCG_STATUS_RIPV))
1518 		kill_current_task = 1;
1519 	/*
1520 	 * Check if this MCE is signaled to only this logical processor,
1521 	 * on Intel, Zhaoxin only.
1522 	 */
1523 	if (m.cpuvendor == X86_VENDOR_INTEL ||
1524 	    m.cpuvendor == X86_VENDOR_ZHAOXIN)
1525 		lmce = m.mcgstatus & MCG_STATUS_LMCES;
1526 
1527 	/*
1528 	 * Local machine check may already know that we have to panic.
1529 	 * Broadcast machine check begins rendezvous in mce_start()
1530 	 * Go through all banks in exclusion of the other CPUs. This way we
1531 	 * don't report duplicated events on shared banks because the first one
1532 	 * to see it will clear it.
1533 	 */
1534 	if (lmce) {
1535 		if (no_way_out)
1536 			mce_panic("Fatal local machine check", &m, msg);
1537 	} else {
1538 		order = mce_start(&no_way_out);
1539 	}
1540 
1541 	taint = __mc_scan_banks(&m, regs, final, toclear, valid_banks, no_way_out, &worst);
1542 
1543 	if (!no_way_out)
1544 		mce_clear_state(toclear);
1545 
1546 	/*
1547 	 * Do most of the synchronization with other CPUs.
1548 	 * When there's any problem use only local no_way_out state.
1549 	 */
1550 	if (!lmce) {
1551 		if (mce_end(order) < 0) {
1552 			if (!no_way_out)
1553 				no_way_out = worst >= MCE_PANIC_SEVERITY;
1554 
1555 			if (no_way_out)
1556 				mce_panic("Fatal machine check on current CPU", &m, msg);
1557 		}
1558 	} else {
1559 		/*
1560 		 * If there was a fatal machine check we should have
1561 		 * already called mce_panic earlier in this function.
1562 		 * Since we re-read the banks, we might have found
1563 		 * something new. Check again to see if we found a
1564 		 * fatal error. We call "mce_severity()" again to
1565 		 * make sure we have the right "msg".
1566 		 */
1567 		if (worst >= MCE_PANIC_SEVERITY) {
1568 			mce_severity(&m, regs, &msg, true);
1569 			mce_panic("Local fatal machine check!", &m, msg);
1570 		}
1571 	}
1572 
1573 	/*
1574 	 * Enable instrumentation around the external facilities like task_work_add()
1575 	 * (via queue_task_work()), fixup_exception() etc. For now, that is. Fixing this
1576 	 * properly would need a lot more involved reorganization.
1577 	 */
1578 	instrumentation_begin();
1579 
1580 	if (taint)
1581 		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
1582 
1583 	if (worst != MCE_AR_SEVERITY && !kill_current_task)
1584 		goto out;
1585 
1586 	/* Fault was in user mode and we need to take some action */
1587 	if ((m.cs & 3) == 3) {
1588 		/* If this triggers there is no way to recover. Die hard. */
1589 		BUG_ON(!on_thread_stack() || !user_mode(regs));
1590 
1591 		if (!mce_usable_address(&m))
1592 			queue_task_work(&m, msg, kill_me_now);
1593 		else
1594 			queue_task_work(&m, msg, kill_me_maybe);
1595 
1596 	} else if (m.mcgstatus & MCG_STATUS_SEAM_NR) {
1597 		/*
1598 		 * Saved RIP on stack makes it look like the machine check
1599 		 * was taken in the kernel on the instruction following
1600 		 * the entry to SEAM mode. But MCG_STATUS_SEAM_NR indicates
1601 		 * that the machine check was taken inside SEAM non-root
1602 		 * mode.  CPU core has already marked that guest as dead.
1603 		 * It is OK for the kernel to resume execution at the
1604 		 * apparent point of the machine check as the fault did
1605 		 * not occur there. Mark the page as poisoned so it won't
1606 		 * be added to free list when the guest is terminated.
1607 		 */
1608 		if (mce_usable_address(&m)) {
1609 			struct page *p = pfn_to_online_page(m.addr >> PAGE_SHIFT);
1610 
1611 			if (p)
1612 				SetPageHWPoison(p);
1613 		}
1614 	} else {
1615 		/*
1616 		 * Handle an MCE which has happened in kernel space but from
1617 		 * which the kernel can recover: ex_has_fault_handler() has
1618 		 * already verified that the rIP at which the error happened is
1619 		 * a rIP from which the kernel can recover (by jumping to
1620 		 * recovery code specified in _ASM_EXTABLE_FAULT()) and the
1621 		 * corresponding exception handler which would do that is the
1622 		 * proper one.
1623 		 */
1624 		if (m.kflags & MCE_IN_KERNEL_RECOV) {
1625 			if (!fixup_exception(regs, X86_TRAP_MC, 0, 0))
1626 				mce_panic("Failed kernel mode recovery", &m, msg);
1627 		}
1628 
1629 		if (m.kflags & MCE_IN_KERNEL_COPYIN)
1630 			queue_task_work(&m, msg, kill_me_never);
1631 	}
1632 
1633 out:
1634 	instrumentation_end();
1635 
1636 clear:
1637 	mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1638 }
1639 EXPORT_SYMBOL_GPL(do_machine_check);
1640 
1641 #ifndef CONFIG_MEMORY_FAILURE
1642 int memory_failure(unsigned long pfn, int flags)
1643 {
1644 	/* mce_severity() should not hand us an ACTION_REQUIRED error */
1645 	BUG_ON(flags & MF_ACTION_REQUIRED);
1646 	pr_err("Uncorrected memory error in page 0x%lx ignored\n"
1647 	       "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
1648 	       pfn);
1649 
1650 	return 0;
1651 }
1652 #endif
1653 
1654 /*
1655  * Periodic polling timer for "silent" machine check errors.  If the
1656  * poller finds an MCE, poll 2x faster.  When the poller finds no more
1657  * errors, poll 2x slower (up to check_interval seconds).
1658  */
1659 static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
1660 
1661 static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
1662 static DEFINE_PER_CPU(struct timer_list, mce_timer);
1663 
1664 static void __start_timer(struct timer_list *t, unsigned long interval)
1665 {
1666 	unsigned long when = jiffies + interval;
1667 	unsigned long flags;
1668 
1669 	local_irq_save(flags);
1670 
1671 	if (!timer_pending(t) || time_before(when, t->expires))
1672 		mod_timer(t, round_jiffies(when));
1673 
1674 	local_irq_restore(flags);
1675 }
1676 
1677 static void mc_poll_banks_default(void)
1678 {
1679 	machine_check_poll(0, this_cpu_ptr(&mce_poll_banks));
1680 }
1681 
1682 void (*mc_poll_banks)(void) = mc_poll_banks_default;
1683 
1684 static void mce_timer_fn(struct timer_list *t)
1685 {
1686 	struct timer_list *cpu_t = this_cpu_ptr(&mce_timer);
1687 	unsigned long iv;
1688 
1689 	WARN_ON(cpu_t != t);
1690 
1691 	iv = __this_cpu_read(mce_next_interval);
1692 
1693 	if (mce_available(this_cpu_ptr(&cpu_info)))
1694 		mc_poll_banks();
1695 
1696 	/*
1697 	 * Alert userspace if needed. If we logged an MCE, reduce the polling
1698 	 * interval, otherwise increase the polling interval.
1699 	 */
1700 	if (mce_notify_irq())
1701 		iv = max(iv / 2, (unsigned long) HZ/100);
1702 	else
1703 		iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
1704 
1705 	if (mce_get_storm_mode()) {
1706 		__start_timer(t, HZ);
1707 	} else {
1708 		__this_cpu_write(mce_next_interval, iv);
1709 		__start_timer(t, iv);
1710 	}
1711 }
1712 
1713 /*
1714  * When a storm starts on any bank on this CPU, switch to polling
1715  * once per second. When the storm ends, revert to the default
1716  * polling interval.
1717  */
1718 void mce_timer_kick(bool storm)
1719 {
1720 	struct timer_list *t = this_cpu_ptr(&mce_timer);
1721 
1722 	mce_set_storm_mode(storm);
1723 
1724 	if (storm)
1725 		__start_timer(t, HZ);
1726 	else
1727 		__this_cpu_write(mce_next_interval, check_interval * HZ);
1728 }
1729 
1730 /* Must not be called in IRQ context where del_timer_sync() can deadlock */
1731 static void mce_timer_delete_all(void)
1732 {
1733 	int cpu;
1734 
1735 	for_each_online_cpu(cpu)
1736 		del_timer_sync(&per_cpu(mce_timer, cpu));
1737 }
1738 
1739 /*
1740  * Notify the user(s) about new machine check events.
1741  * Can be called from interrupt context, but not from machine check/NMI
1742  * context.
1743  */
1744 int mce_notify_irq(void)
1745 {
1746 	/* Not more than two messages every minute */
1747 	static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
1748 
1749 	if (test_and_clear_bit(0, &mce_need_notify)) {
1750 		mce_work_trigger();
1751 
1752 		if (__ratelimit(&ratelimit))
1753 			pr_info(HW_ERR "Machine check events logged\n");
1754 
1755 		return 1;
1756 	}
1757 	return 0;
1758 }
1759 EXPORT_SYMBOL_GPL(mce_notify_irq);
1760 
1761 static void __mcheck_cpu_mce_banks_init(void)
1762 {
1763 	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1764 	u8 n_banks = this_cpu_read(mce_num_banks);
1765 	int i;
1766 
1767 	for (i = 0; i < n_banks; i++) {
1768 		struct mce_bank *b = &mce_banks[i];
1769 
1770 		/*
1771 		 * Init them all, __mcheck_cpu_apply_quirks() is going to apply
1772 		 * the required vendor quirks before
1773 		 * __mcheck_cpu_init_clear_banks() does the final bank setup.
1774 		 */
1775 		b->ctl = -1ULL;
1776 		b->init = true;
1777 	}
1778 }
1779 
1780 /*
1781  * Initialize Machine Checks for a CPU.
1782  */
1783 static void __mcheck_cpu_cap_init(void)
1784 {
1785 	u64 cap;
1786 	u8 b;
1787 
1788 	rdmsrl(MSR_IA32_MCG_CAP, cap);
1789 
1790 	b = cap & MCG_BANKCNT_MASK;
1791 
1792 	if (b > MAX_NR_BANKS) {
1793 		pr_warn("CPU%d: Using only %u machine check banks out of %u\n",
1794 			smp_processor_id(), MAX_NR_BANKS, b);
1795 		b = MAX_NR_BANKS;
1796 	}
1797 
1798 	this_cpu_write(mce_num_banks, b);
1799 
1800 	__mcheck_cpu_mce_banks_init();
1801 
1802 	/* Use accurate RIP reporting if available. */
1803 	if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
1804 		mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
1805 
1806 	if (cap & MCG_SER_P)
1807 		mca_cfg.ser = 1;
1808 }
1809 
1810 static void __mcheck_cpu_init_generic(void)
1811 {
1812 	enum mcp_flags m_fl = 0;
1813 	mce_banks_t all_banks;
1814 	u64 cap;
1815 
1816 	if (!mca_cfg.bootlog)
1817 		m_fl = MCP_DONTLOG;
1818 
1819 	/*
1820 	 * Log the machine checks left over from the previous reset. Log them
1821 	 * only, do not start processing them. That will happen in mcheck_late_init()
1822 	 * when all consumers have been registered on the notifier chain.
1823 	 */
1824 	bitmap_fill(all_banks, MAX_NR_BANKS);
1825 	machine_check_poll(MCP_UC | MCP_QUEUE_LOG | m_fl, &all_banks);
1826 
1827 	cr4_set_bits(X86_CR4_MCE);
1828 
1829 	rdmsrl(MSR_IA32_MCG_CAP, cap);
1830 	if (cap & MCG_CTL_P)
1831 		wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1832 }
1833 
1834 static void __mcheck_cpu_init_clear_banks(void)
1835 {
1836 	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1837 	int i;
1838 
1839 	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1840 		struct mce_bank *b = &mce_banks[i];
1841 
1842 		if (!b->init)
1843 			continue;
1844 		wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl);
1845 		wrmsrl(mca_msr_reg(i, MCA_STATUS), 0);
1846 	}
1847 }
1848 
1849 /*
1850  * Do a final check to see if there are any unused/RAZ banks.
1851  *
1852  * This must be done after the banks have been initialized and any quirks have
1853  * been applied.
1854  *
1855  * Do not call this from any user-initiated flows, e.g. CPU hotplug or sysfs.
1856  * Otherwise, a user who disables a bank will not be able to re-enable it
1857  * without a system reboot.
1858  */
1859 static void __mcheck_cpu_check_banks(void)
1860 {
1861 	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1862 	u64 msrval;
1863 	int i;
1864 
1865 	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1866 		struct mce_bank *b = &mce_banks[i];
1867 
1868 		if (!b->init)
1869 			continue;
1870 
1871 		rdmsrl(mca_msr_reg(i, MCA_CTL), msrval);
1872 		b->init = !!msrval;
1873 	}
1874 }
1875 
1876 /* Add per CPU specific workarounds here */
1877 static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1878 {
1879 	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1880 	struct mca_config *cfg = &mca_cfg;
1881 
1882 	if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
1883 		pr_info("unknown CPU type - not enabling MCE support\n");
1884 		return -EOPNOTSUPP;
1885 	}
1886 
1887 	/* This should be disabled by the BIOS, but isn't always */
1888 	if (c->x86_vendor == X86_VENDOR_AMD) {
1889 		if (c->x86 == 15 && this_cpu_read(mce_num_banks) > 4) {
1890 			/*
1891 			 * disable GART TBL walk error reporting, which
1892 			 * trips off incorrectly with the IOMMU & 3ware
1893 			 * & Cerberus:
1894 			 */
1895 			clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
1896 		}
1897 		if (c->x86 < 0x11 && cfg->bootlog < 0) {
1898 			/*
1899 			 * Lots of broken BIOS around that don't clear them
1900 			 * by default and leave crap in there. Don't log:
1901 			 */
1902 			cfg->bootlog = 0;
1903 		}
1904 		/*
1905 		 * Various K7s with broken bank 0 around. Always disable
1906 		 * by default.
1907 		 */
1908 		if (c->x86 == 6 && this_cpu_read(mce_num_banks) > 0)
1909 			mce_banks[0].ctl = 0;
1910 
1911 		/*
1912 		 * overflow_recov is supported for F15h Models 00h-0fh
1913 		 * even though we don't have a CPUID bit for it.
1914 		 */
1915 		if (c->x86 == 0x15 && c->x86_model <= 0xf)
1916 			mce_flags.overflow_recov = 1;
1917 
1918 		if (c->x86 >= 0x17 && c->x86 <= 0x1A)
1919 			mce_flags.zen_ifu_quirk = 1;
1920 
1921 	}
1922 
1923 	if (c->x86_vendor == X86_VENDOR_INTEL) {
1924 		/*
1925 		 * SDM documents that on family 6 bank 0 should not be written
1926 		 * because it aliases to another special BIOS controlled
1927 		 * register.
1928 		 * But it's not aliased anymore on model 0x1a+
1929 		 * Don't ignore bank 0 completely because there could be a
1930 		 * valid event later, merely don't write CTL0.
1931 		 */
1932 
1933 		if (c->x86 == 6 && c->x86_model < 0x1A && this_cpu_read(mce_num_banks) > 0)
1934 			mce_banks[0].init = false;
1935 
1936 		/*
1937 		 * All newer Intel systems support MCE broadcasting. Enable
1938 		 * synchronization with a one second timeout.
1939 		 */
1940 		if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
1941 			cfg->monarch_timeout < 0)
1942 			cfg->monarch_timeout = USEC_PER_SEC;
1943 
1944 		/*
1945 		 * There are also broken BIOSes on some Pentium M and
1946 		 * earlier systems:
1947 		 */
1948 		if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
1949 			cfg->bootlog = 0;
1950 
1951 		if (c->x86_vfm == INTEL_SANDYBRIDGE_X)
1952 			mce_flags.snb_ifu_quirk = 1;
1953 
1954 		/*
1955 		 * Skylake, Cascacde Lake and Cooper Lake require a quirk on
1956 		 * rep movs.
1957 		 */
1958 		if (c->x86_vfm == INTEL_SKYLAKE_X)
1959 			mce_flags.skx_repmov_quirk = 1;
1960 	}
1961 
1962 	if (c->x86_vendor == X86_VENDOR_ZHAOXIN) {
1963 		/*
1964 		 * All newer Zhaoxin CPUs support MCE broadcasting. Enable
1965 		 * synchronization with a one second timeout.
1966 		 */
1967 		if (c->x86 > 6 || (c->x86_model == 0x19 || c->x86_model == 0x1f)) {
1968 			if (cfg->monarch_timeout < 0)
1969 				cfg->monarch_timeout = USEC_PER_SEC;
1970 		}
1971 	}
1972 
1973 	if (cfg->monarch_timeout < 0)
1974 		cfg->monarch_timeout = 0;
1975 	if (cfg->bootlog != 0)
1976 		cfg->panic_timeout = 30;
1977 
1978 	return 0;
1979 }
1980 
1981 static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1982 {
1983 	if (c->x86 != 5)
1984 		return 0;
1985 
1986 	switch (c->x86_vendor) {
1987 	case X86_VENDOR_INTEL:
1988 		intel_p5_mcheck_init(c);
1989 		mce_flags.p5 = 1;
1990 		return 1;
1991 	case X86_VENDOR_CENTAUR:
1992 		winchip_mcheck_init(c);
1993 		mce_flags.winchip = 1;
1994 		return 1;
1995 	default:
1996 		return 0;
1997 	}
1998 
1999 	return 0;
2000 }
2001 
2002 /*
2003  * Init basic CPU features needed for early decoding of MCEs.
2004  */
2005 static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
2006 {
2007 	if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) {
2008 		mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
2009 		mce_flags.succor	 = !!cpu_has(c, X86_FEATURE_SUCCOR);
2010 		mce_flags.smca		 = !!cpu_has(c, X86_FEATURE_SMCA);
2011 		mce_flags.amd_threshold	 = 1;
2012 	}
2013 }
2014 
2015 static void mce_centaur_feature_init(struct cpuinfo_x86 *c)
2016 {
2017 	struct mca_config *cfg = &mca_cfg;
2018 
2019 	 /*
2020 	  * All newer Centaur CPUs support MCE broadcasting. Enable
2021 	  * synchronization with a one second timeout.
2022 	  */
2023 	if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) ||
2024 	     c->x86 > 6) {
2025 		if (cfg->monarch_timeout < 0)
2026 			cfg->monarch_timeout = USEC_PER_SEC;
2027 	}
2028 }
2029 
2030 static void mce_zhaoxin_feature_init(struct cpuinfo_x86 *c)
2031 {
2032 	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
2033 
2034 	/*
2035 	 * These CPUs have MCA bank 8 which reports only one error type called
2036 	 * SVAD (System View Address Decoder). The reporting of that error is
2037 	 * controlled by IA32_MC8.CTL.0.
2038 	 *
2039 	 * If enabled, prefetching on these CPUs will cause SVAD MCE when
2040 	 * virtual machines start and result in a system  panic. Always disable
2041 	 * bank 8 SVAD error by default.
2042 	 */
2043 	if ((c->x86 == 7 && c->x86_model == 0x1b) ||
2044 	    (c->x86_model == 0x19 || c->x86_model == 0x1f)) {
2045 		if (this_cpu_read(mce_num_banks) > 8)
2046 			mce_banks[8].ctl = 0;
2047 	}
2048 
2049 	intel_init_cmci();
2050 	intel_init_lmce();
2051 }
2052 
2053 static void mce_zhaoxin_feature_clear(struct cpuinfo_x86 *c)
2054 {
2055 	intel_clear_lmce();
2056 }
2057 
2058 static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
2059 {
2060 	switch (c->x86_vendor) {
2061 	case X86_VENDOR_INTEL:
2062 		mce_intel_feature_init(c);
2063 		break;
2064 
2065 	case X86_VENDOR_AMD: {
2066 		mce_amd_feature_init(c);
2067 		break;
2068 		}
2069 
2070 	case X86_VENDOR_HYGON:
2071 		mce_hygon_feature_init(c);
2072 		break;
2073 
2074 	case X86_VENDOR_CENTAUR:
2075 		mce_centaur_feature_init(c);
2076 		break;
2077 
2078 	case X86_VENDOR_ZHAOXIN:
2079 		mce_zhaoxin_feature_init(c);
2080 		break;
2081 
2082 	default:
2083 		break;
2084 	}
2085 }
2086 
2087 static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
2088 {
2089 	switch (c->x86_vendor) {
2090 	case X86_VENDOR_INTEL:
2091 		mce_intel_feature_clear(c);
2092 		break;
2093 
2094 	case X86_VENDOR_ZHAOXIN:
2095 		mce_zhaoxin_feature_clear(c);
2096 		break;
2097 
2098 	default:
2099 		break;
2100 	}
2101 }
2102 
2103 static void mce_start_timer(struct timer_list *t)
2104 {
2105 	unsigned long iv = check_interval * HZ;
2106 
2107 	if (mca_cfg.ignore_ce || !iv)
2108 		return;
2109 
2110 	this_cpu_write(mce_next_interval, iv);
2111 	__start_timer(t, iv);
2112 }
2113 
2114 static void __mcheck_cpu_setup_timer(void)
2115 {
2116 	struct timer_list *t = this_cpu_ptr(&mce_timer);
2117 
2118 	timer_setup(t, mce_timer_fn, TIMER_PINNED);
2119 }
2120 
2121 static void __mcheck_cpu_init_timer(void)
2122 {
2123 	struct timer_list *t = this_cpu_ptr(&mce_timer);
2124 
2125 	timer_setup(t, mce_timer_fn, TIMER_PINNED);
2126 	mce_start_timer(t);
2127 }
2128 
2129 bool filter_mce(struct mce *m)
2130 {
2131 	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
2132 		return amd_filter_mce(m);
2133 	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
2134 		return intel_filter_mce(m);
2135 
2136 	return false;
2137 }
2138 
2139 static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
2140 {
2141 	irqentry_state_t irq_state;
2142 
2143 	WARN_ON_ONCE(user_mode(regs));
2144 
2145 	/*
2146 	 * Only required when from kernel mode. See
2147 	 * mce_check_crashing_cpu() for details.
2148 	 */
2149 	if (mca_cfg.initialized && mce_check_crashing_cpu())
2150 		return;
2151 
2152 	irq_state = irqentry_nmi_enter(regs);
2153 
2154 	do_machine_check(regs);
2155 
2156 	irqentry_nmi_exit(regs, irq_state);
2157 }
2158 
2159 static __always_inline void exc_machine_check_user(struct pt_regs *regs)
2160 {
2161 	irqentry_enter_from_user_mode(regs);
2162 
2163 	do_machine_check(regs);
2164 
2165 	irqentry_exit_to_user_mode(regs);
2166 }
2167 
2168 #ifdef CONFIG_X86_64
2169 /* MCE hit kernel mode */
2170 DEFINE_IDTENTRY_MCE(exc_machine_check)
2171 {
2172 	unsigned long dr7;
2173 
2174 	dr7 = local_db_save();
2175 	exc_machine_check_kernel(regs);
2176 	local_db_restore(dr7);
2177 }
2178 
2179 /* The user mode variant. */
2180 DEFINE_IDTENTRY_MCE_USER(exc_machine_check)
2181 {
2182 	unsigned long dr7;
2183 
2184 	dr7 = local_db_save();
2185 	exc_machine_check_user(regs);
2186 	local_db_restore(dr7);
2187 }
2188 
2189 #ifdef CONFIG_X86_FRED
2190 /*
2191  * When occurred on different ring level, i.e., from user or kernel
2192  * context, #MCE needs to be handled on different stack: User #MCE
2193  * on current task stack, while kernel #MCE on a dedicated stack.
2194  *
2195  * This is exactly how FRED event delivery invokes an exception
2196  * handler: ring 3 event on level 0 stack, i.e., current task stack;
2197  * ring 0 event on the #MCE dedicated stack specified in the
2198  * IA32_FRED_STKLVLS MSR. So unlike IDT, the FRED machine check entry
2199  * stub doesn't do stack switch.
2200  */
2201 DEFINE_FREDENTRY_MCE(exc_machine_check)
2202 {
2203 	unsigned long dr7;
2204 
2205 	dr7 = local_db_save();
2206 	if (user_mode(regs))
2207 		exc_machine_check_user(regs);
2208 	else
2209 		exc_machine_check_kernel(regs);
2210 	local_db_restore(dr7);
2211 }
2212 #endif
2213 #else
2214 /* 32bit unified entry point */
2215 DEFINE_IDTENTRY_RAW(exc_machine_check)
2216 {
2217 	unsigned long dr7;
2218 
2219 	dr7 = local_db_save();
2220 	if (user_mode(regs))
2221 		exc_machine_check_user(regs);
2222 	else
2223 		exc_machine_check_kernel(regs);
2224 	local_db_restore(dr7);
2225 }
2226 #endif
2227 
2228 /*
2229  * Called for each booted CPU to set up machine checks.
2230  * Must be called with preempt off:
2231  */
2232 void mcheck_cpu_init(struct cpuinfo_x86 *c)
2233 {
2234 	if (mca_cfg.disabled)
2235 		return;
2236 
2237 	if (__mcheck_cpu_ancient_init(c))
2238 		return;
2239 
2240 	if (!mce_available(c))
2241 		return;
2242 
2243 	__mcheck_cpu_cap_init();
2244 
2245 	if (__mcheck_cpu_apply_quirks(c) < 0) {
2246 		mca_cfg.disabled = 1;
2247 		return;
2248 	}
2249 
2250 	if (mce_gen_pool_init()) {
2251 		mca_cfg.disabled = 1;
2252 		pr_emerg("Couldn't allocate MCE records pool!\n");
2253 		return;
2254 	}
2255 
2256 	mca_cfg.initialized = 1;
2257 
2258 	__mcheck_cpu_init_early(c);
2259 	__mcheck_cpu_init_generic();
2260 	__mcheck_cpu_init_vendor(c);
2261 	__mcheck_cpu_init_clear_banks();
2262 	__mcheck_cpu_check_banks();
2263 	__mcheck_cpu_setup_timer();
2264 }
2265 
2266 /*
2267  * Called for each booted CPU to clear some machine checks opt-ins
2268  */
2269 void mcheck_cpu_clear(struct cpuinfo_x86 *c)
2270 {
2271 	if (mca_cfg.disabled)
2272 		return;
2273 
2274 	if (!mce_available(c))
2275 		return;
2276 
2277 	/*
2278 	 * Possibly to clear general settings generic to x86
2279 	 * __mcheck_cpu_clear_generic(c);
2280 	 */
2281 	__mcheck_cpu_clear_vendor(c);
2282 
2283 }
2284 
2285 static void __mce_disable_bank(void *arg)
2286 {
2287 	int bank = *((int *)arg);
2288 	__clear_bit(bank, this_cpu_ptr(mce_poll_banks));
2289 	cmci_disable_bank(bank);
2290 }
2291 
2292 void mce_disable_bank(int bank)
2293 {
2294 	if (bank >= this_cpu_read(mce_num_banks)) {
2295 		pr_warn(FW_BUG
2296 			"Ignoring request to disable invalid MCA bank %d.\n",
2297 			bank);
2298 		return;
2299 	}
2300 	set_bit(bank, mce_banks_ce_disabled);
2301 	on_each_cpu(__mce_disable_bank, &bank, 1);
2302 }
2303 
2304 /*
2305  * mce=off Disables machine check
2306  * mce=no_cmci Disables CMCI
2307  * mce=no_lmce Disables LMCE
2308  * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
2309  * mce=print_all Print all machine check logs to console
2310  * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
2311  * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
2312  *	monarchtimeout is how long to wait for other CPUs on machine
2313  *	check, or 0 to not wait
2314  * mce=bootlog Log MCEs from before booting. Disabled by default on AMD Fam10h
2315 	and older.
2316  * mce=nobootlog Don't log MCEs from before booting.
2317  * mce=bios_cmci_threshold Don't program the CMCI threshold
2318  * mce=recovery force enable copy_mc_fragile()
2319  */
2320 static int __init mcheck_enable(char *str)
2321 {
2322 	struct mca_config *cfg = &mca_cfg;
2323 
2324 	if (*str == 0) {
2325 		enable_p5_mce();
2326 		return 1;
2327 	}
2328 	if (*str == '=')
2329 		str++;
2330 	if (!strcmp(str, "off"))
2331 		cfg->disabled = 1;
2332 	else if (!strcmp(str, "no_cmci"))
2333 		cfg->cmci_disabled = true;
2334 	else if (!strcmp(str, "no_lmce"))
2335 		cfg->lmce_disabled = 1;
2336 	else if (!strcmp(str, "dont_log_ce"))
2337 		cfg->dont_log_ce = true;
2338 	else if (!strcmp(str, "print_all"))
2339 		cfg->print_all = true;
2340 	else if (!strcmp(str, "ignore_ce"))
2341 		cfg->ignore_ce = true;
2342 	else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
2343 		cfg->bootlog = (str[0] == 'b');
2344 	else if (!strcmp(str, "bios_cmci_threshold"))
2345 		cfg->bios_cmci_threshold = 1;
2346 	else if (!strcmp(str, "recovery"))
2347 		cfg->recovery = 1;
2348 	else if (isdigit(str[0]))
2349 		get_option(&str, &(cfg->monarch_timeout));
2350 	else {
2351 		pr_info("mce argument %s ignored. Please use /sys\n", str);
2352 		return 0;
2353 	}
2354 	return 1;
2355 }
2356 __setup("mce", mcheck_enable);
2357 
2358 int __init mcheck_init(void)
2359 {
2360 	mce_register_decode_chain(&early_nb);
2361 	mce_register_decode_chain(&mce_uc_nb);
2362 	mce_register_decode_chain(&mce_default_nb);
2363 
2364 	INIT_WORK(&mce_work, mce_gen_pool_process);
2365 	init_irq_work(&mce_irq_work, mce_irq_work_cb);
2366 
2367 	return 0;
2368 }
2369 
2370 /*
2371  * mce_syscore: PM support
2372  */
2373 
2374 /*
2375  * Disable machine checks on suspend and shutdown. We can't really handle
2376  * them later.
2377  */
2378 static void mce_disable_error_reporting(void)
2379 {
2380 	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
2381 	int i;
2382 
2383 	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
2384 		struct mce_bank *b = &mce_banks[i];
2385 
2386 		if (b->init)
2387 			wrmsrl(mca_msr_reg(i, MCA_CTL), 0);
2388 	}
2389 	return;
2390 }
2391 
2392 static void vendor_disable_error_reporting(void)
2393 {
2394 	/*
2395 	 * Don't clear on Intel or AMD or Hygon or Zhaoxin CPUs. Some of these
2396 	 * MSRs are socket-wide. Disabling them for just a single offlined CPU
2397 	 * is bad, since it will inhibit reporting for all shared resources on
2398 	 * the socket like the last level cache (LLC), the integrated memory
2399 	 * controller (iMC), etc.
2400 	 */
2401 	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
2402 	    boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ||
2403 	    boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
2404 	    boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN)
2405 		return;
2406 
2407 	mce_disable_error_reporting();
2408 }
2409 
2410 static int mce_syscore_suspend(void)
2411 {
2412 	vendor_disable_error_reporting();
2413 	return 0;
2414 }
2415 
2416 static void mce_syscore_shutdown(void)
2417 {
2418 	vendor_disable_error_reporting();
2419 }
2420 
2421 /*
2422  * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
2423  * Only one CPU is active at this time, the others get re-added later using
2424  * CPU hotplug:
2425  */
2426 static void mce_syscore_resume(void)
2427 {
2428 	__mcheck_cpu_init_generic();
2429 	__mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
2430 	__mcheck_cpu_init_clear_banks();
2431 }
2432 
2433 static struct syscore_ops mce_syscore_ops = {
2434 	.suspend	= mce_syscore_suspend,
2435 	.shutdown	= mce_syscore_shutdown,
2436 	.resume		= mce_syscore_resume,
2437 };
2438 
2439 /*
2440  * mce_device: Sysfs support
2441  */
2442 
2443 static void mce_cpu_restart(void *data)
2444 {
2445 	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2446 		return;
2447 	__mcheck_cpu_init_generic();
2448 	__mcheck_cpu_init_clear_banks();
2449 	__mcheck_cpu_init_timer();
2450 }
2451 
2452 /* Reinit MCEs after user configuration changes */
2453 static void mce_restart(void)
2454 {
2455 	mce_timer_delete_all();
2456 	on_each_cpu(mce_cpu_restart, NULL, 1);
2457 	mce_schedule_work();
2458 }
2459 
2460 /* Toggle features for corrected errors */
2461 static void mce_disable_cmci(void *data)
2462 {
2463 	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2464 		return;
2465 	cmci_clear();
2466 }
2467 
2468 static void mce_enable_ce(void *all)
2469 {
2470 	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2471 		return;
2472 	cmci_reenable();
2473 	cmci_recheck();
2474 	if (all)
2475 		__mcheck_cpu_init_timer();
2476 }
2477 
2478 static const struct bus_type mce_subsys = {
2479 	.name		= "machinecheck",
2480 	.dev_name	= "machinecheck",
2481 };
2482 
2483 DEFINE_PER_CPU(struct device *, mce_device);
2484 
2485 static inline struct mce_bank_dev *attr_to_bank(struct device_attribute *attr)
2486 {
2487 	return container_of(attr, struct mce_bank_dev, attr);
2488 }
2489 
2490 static ssize_t show_bank(struct device *s, struct device_attribute *attr,
2491 			 char *buf)
2492 {
2493 	u8 bank = attr_to_bank(attr)->bank;
2494 	struct mce_bank *b;
2495 
2496 	if (bank >= per_cpu(mce_num_banks, s->id))
2497 		return -EINVAL;
2498 
2499 	b = &per_cpu(mce_banks_array, s->id)[bank];
2500 
2501 	if (!b->init)
2502 		return -ENODEV;
2503 
2504 	return sprintf(buf, "%llx\n", b->ctl);
2505 }
2506 
2507 static ssize_t set_bank(struct device *s, struct device_attribute *attr,
2508 			const char *buf, size_t size)
2509 {
2510 	u8 bank = attr_to_bank(attr)->bank;
2511 	struct mce_bank *b;
2512 	u64 new;
2513 
2514 	if (kstrtou64(buf, 0, &new) < 0)
2515 		return -EINVAL;
2516 
2517 	if (bank >= per_cpu(mce_num_banks, s->id))
2518 		return -EINVAL;
2519 
2520 	b = &per_cpu(mce_banks_array, s->id)[bank];
2521 	if (!b->init)
2522 		return -ENODEV;
2523 
2524 	b->ctl = new;
2525 
2526 	mutex_lock(&mce_sysfs_mutex);
2527 	mce_restart();
2528 	mutex_unlock(&mce_sysfs_mutex);
2529 
2530 	return size;
2531 }
2532 
2533 static ssize_t set_ignore_ce(struct device *s,
2534 			     struct device_attribute *attr,
2535 			     const char *buf, size_t size)
2536 {
2537 	u64 new;
2538 
2539 	if (kstrtou64(buf, 0, &new) < 0)
2540 		return -EINVAL;
2541 
2542 	mutex_lock(&mce_sysfs_mutex);
2543 	if (mca_cfg.ignore_ce ^ !!new) {
2544 		if (new) {
2545 			/* disable ce features */
2546 			mce_timer_delete_all();
2547 			on_each_cpu(mce_disable_cmci, NULL, 1);
2548 			mca_cfg.ignore_ce = true;
2549 		} else {
2550 			/* enable ce features */
2551 			mca_cfg.ignore_ce = false;
2552 			on_each_cpu(mce_enable_ce, (void *)1, 1);
2553 		}
2554 	}
2555 	mutex_unlock(&mce_sysfs_mutex);
2556 
2557 	return size;
2558 }
2559 
2560 static ssize_t set_cmci_disabled(struct device *s,
2561 				 struct device_attribute *attr,
2562 				 const char *buf, size_t size)
2563 {
2564 	u64 new;
2565 
2566 	if (kstrtou64(buf, 0, &new) < 0)
2567 		return -EINVAL;
2568 
2569 	mutex_lock(&mce_sysfs_mutex);
2570 	if (mca_cfg.cmci_disabled ^ !!new) {
2571 		if (new) {
2572 			/* disable cmci */
2573 			on_each_cpu(mce_disable_cmci, NULL, 1);
2574 			mca_cfg.cmci_disabled = true;
2575 		} else {
2576 			/* enable cmci */
2577 			mca_cfg.cmci_disabled = false;
2578 			on_each_cpu(mce_enable_ce, NULL, 1);
2579 		}
2580 	}
2581 	mutex_unlock(&mce_sysfs_mutex);
2582 
2583 	return size;
2584 }
2585 
2586 static ssize_t store_int_with_restart(struct device *s,
2587 				      struct device_attribute *attr,
2588 				      const char *buf, size_t size)
2589 {
2590 	unsigned long old_check_interval = check_interval;
2591 	ssize_t ret = device_store_ulong(s, attr, buf, size);
2592 
2593 	if (check_interval == old_check_interval)
2594 		return ret;
2595 
2596 	mutex_lock(&mce_sysfs_mutex);
2597 	mce_restart();
2598 	mutex_unlock(&mce_sysfs_mutex);
2599 
2600 	return ret;
2601 }
2602 
2603 static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
2604 static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
2605 static DEVICE_BOOL_ATTR(print_all, 0644, mca_cfg.print_all);
2606 
2607 static struct dev_ext_attribute dev_attr_check_interval = {
2608 	__ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
2609 	&check_interval
2610 };
2611 
2612 static struct dev_ext_attribute dev_attr_ignore_ce = {
2613 	__ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce),
2614 	&mca_cfg.ignore_ce
2615 };
2616 
2617 static struct dev_ext_attribute dev_attr_cmci_disabled = {
2618 	__ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled),
2619 	&mca_cfg.cmci_disabled
2620 };
2621 
2622 static struct device_attribute *mce_device_attrs[] = {
2623 	&dev_attr_check_interval.attr,
2624 #ifdef CONFIG_X86_MCELOG_LEGACY
2625 	&dev_attr_trigger,
2626 #endif
2627 	&dev_attr_monarch_timeout.attr,
2628 	&dev_attr_dont_log_ce.attr,
2629 	&dev_attr_print_all.attr,
2630 	&dev_attr_ignore_ce.attr,
2631 	&dev_attr_cmci_disabled.attr,
2632 	NULL
2633 };
2634 
2635 static cpumask_var_t mce_device_initialized;
2636 
2637 static void mce_device_release(struct device *dev)
2638 {
2639 	kfree(dev);
2640 }
2641 
2642 /* Per CPU device init. All of the CPUs still share the same bank device: */
2643 static int mce_device_create(unsigned int cpu)
2644 {
2645 	struct device *dev;
2646 	int err;
2647 	int i, j;
2648 
2649 	dev = per_cpu(mce_device, cpu);
2650 	if (dev)
2651 		return 0;
2652 
2653 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2654 	if (!dev)
2655 		return -ENOMEM;
2656 	dev->id  = cpu;
2657 	dev->bus = &mce_subsys;
2658 	dev->release = &mce_device_release;
2659 
2660 	err = device_register(dev);
2661 	if (err) {
2662 		put_device(dev);
2663 		return err;
2664 	}
2665 
2666 	for (i = 0; mce_device_attrs[i]; i++) {
2667 		err = device_create_file(dev, mce_device_attrs[i]);
2668 		if (err)
2669 			goto error;
2670 	}
2671 	for (j = 0; j < per_cpu(mce_num_banks, cpu); j++) {
2672 		err = device_create_file(dev, &mce_bank_devs[j].attr);
2673 		if (err)
2674 			goto error2;
2675 	}
2676 	cpumask_set_cpu(cpu, mce_device_initialized);
2677 	per_cpu(mce_device, cpu) = dev;
2678 
2679 	return 0;
2680 error2:
2681 	while (--j >= 0)
2682 		device_remove_file(dev, &mce_bank_devs[j].attr);
2683 error:
2684 	while (--i >= 0)
2685 		device_remove_file(dev, mce_device_attrs[i]);
2686 
2687 	device_unregister(dev);
2688 
2689 	return err;
2690 }
2691 
2692 static void mce_device_remove(unsigned int cpu)
2693 {
2694 	struct device *dev = per_cpu(mce_device, cpu);
2695 	int i;
2696 
2697 	if (!cpumask_test_cpu(cpu, mce_device_initialized))
2698 		return;
2699 
2700 	for (i = 0; mce_device_attrs[i]; i++)
2701 		device_remove_file(dev, mce_device_attrs[i]);
2702 
2703 	for (i = 0; i < per_cpu(mce_num_banks, cpu); i++)
2704 		device_remove_file(dev, &mce_bank_devs[i].attr);
2705 
2706 	device_unregister(dev);
2707 	cpumask_clear_cpu(cpu, mce_device_initialized);
2708 	per_cpu(mce_device, cpu) = NULL;
2709 }
2710 
2711 /* Make sure there are no machine checks on offlined CPUs. */
2712 static void mce_disable_cpu(void)
2713 {
2714 	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2715 		return;
2716 
2717 	if (!cpuhp_tasks_frozen)
2718 		cmci_clear();
2719 
2720 	vendor_disable_error_reporting();
2721 }
2722 
2723 static void mce_reenable_cpu(void)
2724 {
2725 	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
2726 	int i;
2727 
2728 	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2729 		return;
2730 
2731 	if (!cpuhp_tasks_frozen)
2732 		cmci_reenable();
2733 	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
2734 		struct mce_bank *b = &mce_banks[i];
2735 
2736 		if (b->init)
2737 			wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl);
2738 	}
2739 }
2740 
2741 static int mce_cpu_dead(unsigned int cpu)
2742 {
2743 	/* intentionally ignoring frozen here */
2744 	if (!cpuhp_tasks_frozen)
2745 		cmci_rediscover();
2746 	return 0;
2747 }
2748 
2749 static int mce_cpu_online(unsigned int cpu)
2750 {
2751 	struct timer_list *t = this_cpu_ptr(&mce_timer);
2752 	int ret;
2753 
2754 	mce_device_create(cpu);
2755 
2756 	ret = mce_threshold_create_device(cpu);
2757 	if (ret) {
2758 		mce_device_remove(cpu);
2759 		return ret;
2760 	}
2761 	mce_reenable_cpu();
2762 	mce_start_timer(t);
2763 	return 0;
2764 }
2765 
2766 static int mce_cpu_pre_down(unsigned int cpu)
2767 {
2768 	struct timer_list *t = this_cpu_ptr(&mce_timer);
2769 
2770 	mce_disable_cpu();
2771 	del_timer_sync(t);
2772 	mce_threshold_remove_device(cpu);
2773 	mce_device_remove(cpu);
2774 	return 0;
2775 }
2776 
2777 static __init void mce_init_banks(void)
2778 {
2779 	int i;
2780 
2781 	for (i = 0; i < MAX_NR_BANKS; i++) {
2782 		struct mce_bank_dev *b = &mce_bank_devs[i];
2783 		struct device_attribute *a = &b->attr;
2784 
2785 		b->bank = i;
2786 
2787 		sysfs_attr_init(&a->attr);
2788 		a->attr.name	= b->attrname;
2789 		snprintf(b->attrname, ATTR_LEN, "bank%d", i);
2790 
2791 		a->attr.mode	= 0644;
2792 		a->show		= show_bank;
2793 		a->store	= set_bank;
2794 	}
2795 }
2796 
2797 /*
2798  * When running on XEN, this initcall is ordered against the XEN mcelog
2799  * initcall:
2800  *
2801  *   device_initcall(xen_late_init_mcelog);
2802  *   device_initcall_sync(mcheck_init_device);
2803  */
2804 static __init int mcheck_init_device(void)
2805 {
2806 	int err;
2807 
2808 	/*
2809 	 * Check if we have a spare virtual bit. This will only become
2810 	 * a problem if/when we move beyond 5-level page tables.
2811 	 */
2812 	MAYBE_BUILD_BUG_ON(__VIRTUAL_MASK_SHIFT >= 63);
2813 
2814 	if (!mce_available(&boot_cpu_data)) {
2815 		err = -EIO;
2816 		goto err_out;
2817 	}
2818 
2819 	if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
2820 		err = -ENOMEM;
2821 		goto err_out;
2822 	}
2823 
2824 	mce_init_banks();
2825 
2826 	err = subsys_system_register(&mce_subsys, NULL);
2827 	if (err)
2828 		goto err_out_mem;
2829 
2830 	err = cpuhp_setup_state(CPUHP_X86_MCE_DEAD, "x86/mce:dead", NULL,
2831 				mce_cpu_dead);
2832 	if (err)
2833 		goto err_out_mem;
2834 
2835 	/*
2836 	 * Invokes mce_cpu_online() on all CPUs which are online when
2837 	 * the state is installed.
2838 	 */
2839 	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/mce:online",
2840 				mce_cpu_online, mce_cpu_pre_down);
2841 	if (err < 0)
2842 		goto err_out_online;
2843 
2844 	register_syscore_ops(&mce_syscore_ops);
2845 
2846 	return 0;
2847 
2848 err_out_online:
2849 	cpuhp_remove_state(CPUHP_X86_MCE_DEAD);
2850 
2851 err_out_mem:
2852 	free_cpumask_var(mce_device_initialized);
2853 
2854 err_out:
2855 	pr_err("Unable to init MCE device (rc: %d)\n", err);
2856 
2857 	return err;
2858 }
2859 device_initcall_sync(mcheck_init_device);
2860 
2861 /*
2862  * Old style boot options parsing. Only for compatibility.
2863  */
2864 static int __init mcheck_disable(char *str)
2865 {
2866 	mca_cfg.disabled = 1;
2867 	return 1;
2868 }
2869 __setup("nomce", mcheck_disable);
2870 
2871 #ifdef CONFIG_DEBUG_FS
2872 struct dentry *mce_get_debugfs_dir(void)
2873 {
2874 	static struct dentry *dmce;
2875 
2876 	if (!dmce)
2877 		dmce = debugfs_create_dir("mce", NULL);
2878 
2879 	return dmce;
2880 }
2881 
2882 static void mce_reset(void)
2883 {
2884 	atomic_set(&mce_fake_panicked, 0);
2885 	atomic_set(&mce_executing, 0);
2886 	atomic_set(&mce_callin, 0);
2887 	atomic_set(&global_nwo, 0);
2888 	cpumask_setall(&mce_missing_cpus);
2889 }
2890 
2891 static int fake_panic_get(void *data, u64 *val)
2892 {
2893 	*val = fake_panic;
2894 	return 0;
2895 }
2896 
2897 static int fake_panic_set(void *data, u64 val)
2898 {
2899 	mce_reset();
2900 	fake_panic = val;
2901 	return 0;
2902 }
2903 
2904 DEFINE_DEBUGFS_ATTRIBUTE(fake_panic_fops, fake_panic_get, fake_panic_set,
2905 			 "%llu\n");
2906 
2907 static void __init mcheck_debugfs_init(void)
2908 {
2909 	struct dentry *dmce;
2910 
2911 	dmce = mce_get_debugfs_dir();
2912 	debugfs_create_file_unsafe("fake_panic", 0444, dmce, NULL,
2913 				   &fake_panic_fops);
2914 }
2915 #else
2916 static void __init mcheck_debugfs_init(void) { }
2917 #endif
2918 
2919 static int __init mcheck_late_init(void)
2920 {
2921 	if (mca_cfg.recovery)
2922 		enable_copy_mc_fragile();
2923 
2924 	mcheck_debugfs_init();
2925 
2926 	/*
2927 	 * Flush out everything that has been logged during early boot, now that
2928 	 * everything has been initialized (workqueues, decoders, ...).
2929 	 */
2930 	mce_schedule_work();
2931 
2932 	return 0;
2933 }
2934 late_initcall(mcheck_late_init);
2935