xref: /linux/arch/x86/kernel/cpu/mce/core.c (revision 6fdcba32711044c35c0e1b094cbd8f3f0b4472c9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Machine check handler.
4  *
5  * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
6  * Rest from unknown author(s).
7  * 2004 Andi Kleen. Rewrote most of it.
8  * Copyright 2008 Intel Corporation
9  * Author: Andi Kleen
10  */
11 
12 #include <linux/thread_info.h>
13 #include <linux/capability.h>
14 #include <linux/miscdevice.h>
15 #include <linux/ratelimit.h>
16 #include <linux/rcupdate.h>
17 #include <linux/kobject.h>
18 #include <linux/uaccess.h>
19 #include <linux/kdebug.h>
20 #include <linux/kernel.h>
21 #include <linux/percpu.h>
22 #include <linux/string.h>
23 #include <linux/device.h>
24 #include <linux/syscore_ops.h>
25 #include <linux/delay.h>
26 #include <linux/ctype.h>
27 #include <linux/sched.h>
28 #include <linux/sysfs.h>
29 #include <linux/types.h>
30 #include <linux/slab.h>
31 #include <linux/init.h>
32 #include <linux/kmod.h>
33 #include <linux/poll.h>
34 #include <linux/nmi.h>
35 #include <linux/cpu.h>
36 #include <linux/ras.h>
37 #include <linux/smp.h>
38 #include <linux/fs.h>
39 #include <linux/mm.h>
40 #include <linux/debugfs.h>
41 #include <linux/irq_work.h>
42 #include <linux/export.h>
43 #include <linux/jump_label.h>
44 #include <linux/set_memory.h>
45 
46 #include <asm/intel-family.h>
47 #include <asm/processor.h>
48 #include <asm/traps.h>
49 #include <asm/tlbflush.h>
50 #include <asm/mce.h>
51 #include <asm/msr.h>
52 #include <asm/reboot.h>
53 
54 #include "internal.h"
55 
56 static DEFINE_MUTEX(mce_log_mutex);
57 
58 /* sysfs synchronization */
59 static DEFINE_MUTEX(mce_sysfs_mutex);
60 
61 #define CREATE_TRACE_POINTS
62 #include <trace/events/mce.h>
63 
64 #define SPINUNIT		100	/* 100ns */
65 
66 DEFINE_PER_CPU(unsigned, mce_exception_count);
67 
68 DEFINE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks);
69 
70 struct mce_bank {
71 	u64			ctl;			/* subevents to enable */
72 	bool			init;			/* initialise bank? */
73 };
74 static DEFINE_PER_CPU_READ_MOSTLY(struct mce_bank[MAX_NR_BANKS], mce_banks_array);
75 
76 #define ATTR_LEN               16
77 /* One object for each MCE bank, shared by all CPUs */
78 struct mce_bank_dev {
79 	struct device_attribute	attr;			/* device attribute */
80 	char			attrname[ATTR_LEN];	/* attribute name */
81 	u8			bank;			/* bank number */
82 };
83 static struct mce_bank_dev mce_bank_devs[MAX_NR_BANKS];
84 
85 struct mce_vendor_flags mce_flags __read_mostly;
86 
87 struct mca_config mca_cfg __read_mostly = {
88 	.bootlog  = -1,
89 	/*
90 	 * Tolerant levels:
91 	 * 0: always panic on uncorrected errors, log corrected errors
92 	 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
93 	 * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors
94 	 * 3: never panic or SIGBUS, log all errors (for testing only)
95 	 */
96 	.tolerant = 1,
97 	.monarch_timeout = -1
98 };
99 
100 static DEFINE_PER_CPU(struct mce, mces_seen);
101 static unsigned long mce_need_notify;
102 static int cpu_missing;
103 
104 /*
105  * MCA banks polled by the period polling timer for corrected events.
106  * With Intel CMCI, this only has MCA banks which do not support CMCI (if any).
107  */
108 DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
109 	[0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
110 };
111 
112 /*
113  * MCA banks controlled through firmware first for corrected errors.
114  * This is a global list of banks for which we won't enable CMCI and we
115  * won't poll. Firmware controls these banks and is responsible for
116  * reporting corrected errors through GHES. Uncorrected/recoverable
117  * errors are still notified through a machine check.
118  */
119 mce_banks_t mce_banks_ce_disabled;
120 
121 static struct work_struct mce_work;
122 static struct irq_work mce_irq_work;
123 
124 static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
125 
126 /*
127  * CPU/chipset specific EDAC code can register a notifier call here to print
128  * MCE errors in a human-readable form.
129  */
130 BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain);
131 
132 /* Do initial initialization of a struct mce */
133 void mce_setup(struct mce *m)
134 {
135 	memset(m, 0, sizeof(struct mce));
136 	m->cpu = m->extcpu = smp_processor_id();
137 	/* need the internal __ version to avoid deadlocks */
138 	m->time = __ktime_get_real_seconds();
139 	m->cpuvendor = boot_cpu_data.x86_vendor;
140 	m->cpuid = cpuid_eax(1);
141 	m->socketid = cpu_data(m->extcpu).phys_proc_id;
142 	m->apicid = cpu_data(m->extcpu).initial_apicid;
143 	rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
144 
145 	if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
146 		rdmsrl(MSR_PPIN, m->ppin);
147 
148 	m->microcode = boot_cpu_data.microcode;
149 }
150 
151 DEFINE_PER_CPU(struct mce, injectm);
152 EXPORT_PER_CPU_SYMBOL_GPL(injectm);
153 
154 void mce_log(struct mce *m)
155 {
156 	if (!mce_gen_pool_add(m))
157 		irq_work_queue(&mce_irq_work);
158 }
159 
160 void mce_inject_log(struct mce *m)
161 {
162 	mutex_lock(&mce_log_mutex);
163 	mce_log(m);
164 	mutex_unlock(&mce_log_mutex);
165 }
166 EXPORT_SYMBOL_GPL(mce_inject_log);
167 
168 static struct notifier_block mce_srao_nb;
169 
170 /*
171  * We run the default notifier if we have only the SRAO, the first and the
172  * default notifier registered. I.e., the mandatory NUM_DEFAULT_NOTIFIERS
173  * notifiers registered on the chain.
174  */
175 #define NUM_DEFAULT_NOTIFIERS	3
176 static atomic_t num_notifiers;
177 
178 void mce_register_decode_chain(struct notifier_block *nb)
179 {
180 	if (WARN_ON(nb->priority > MCE_PRIO_MCELOG && nb->priority < MCE_PRIO_EDAC))
181 		return;
182 
183 	atomic_inc(&num_notifiers);
184 
185 	blocking_notifier_chain_register(&x86_mce_decoder_chain, nb);
186 }
187 EXPORT_SYMBOL_GPL(mce_register_decode_chain);
188 
189 void mce_unregister_decode_chain(struct notifier_block *nb)
190 {
191 	atomic_dec(&num_notifiers);
192 
193 	blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
194 }
195 EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
196 
197 static inline u32 ctl_reg(int bank)
198 {
199 	return MSR_IA32_MCx_CTL(bank);
200 }
201 
202 static inline u32 status_reg(int bank)
203 {
204 	return MSR_IA32_MCx_STATUS(bank);
205 }
206 
207 static inline u32 addr_reg(int bank)
208 {
209 	return MSR_IA32_MCx_ADDR(bank);
210 }
211 
212 static inline u32 misc_reg(int bank)
213 {
214 	return MSR_IA32_MCx_MISC(bank);
215 }
216 
217 static inline u32 smca_ctl_reg(int bank)
218 {
219 	return MSR_AMD64_SMCA_MCx_CTL(bank);
220 }
221 
222 static inline u32 smca_status_reg(int bank)
223 {
224 	return MSR_AMD64_SMCA_MCx_STATUS(bank);
225 }
226 
227 static inline u32 smca_addr_reg(int bank)
228 {
229 	return MSR_AMD64_SMCA_MCx_ADDR(bank);
230 }
231 
232 static inline u32 smca_misc_reg(int bank)
233 {
234 	return MSR_AMD64_SMCA_MCx_MISC(bank);
235 }
236 
237 struct mca_msr_regs msr_ops = {
238 	.ctl	= ctl_reg,
239 	.status	= status_reg,
240 	.addr	= addr_reg,
241 	.misc	= misc_reg
242 };
243 
244 static void __print_mce(struct mce *m)
245 {
246 	pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n",
247 		 m->extcpu,
248 		 (m->mcgstatus & MCG_STATUS_MCIP ? " Exception" : ""),
249 		 m->mcgstatus, m->bank, m->status);
250 
251 	if (m->ip) {
252 		pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
253 			!(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
254 			m->cs, m->ip);
255 
256 		if (m->cs == __KERNEL_CS)
257 			pr_cont("{%pS}", (void *)(unsigned long)m->ip);
258 		pr_cont("\n");
259 	}
260 
261 	pr_emerg(HW_ERR "TSC %llx ", m->tsc);
262 	if (m->addr)
263 		pr_cont("ADDR %llx ", m->addr);
264 	if (m->misc)
265 		pr_cont("MISC %llx ", m->misc);
266 
267 	if (mce_flags.smca) {
268 		if (m->synd)
269 			pr_cont("SYND %llx ", m->synd);
270 		if (m->ipid)
271 			pr_cont("IPID %llx ", m->ipid);
272 	}
273 
274 	pr_cont("\n");
275 	/*
276 	 * Note this output is parsed by external tools and old fields
277 	 * should not be changed.
278 	 */
279 	pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
280 		m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
281 		m->microcode);
282 }
283 
284 static void print_mce(struct mce *m)
285 {
286 	__print_mce(m);
287 
288 	if (m->cpuvendor != X86_VENDOR_AMD && m->cpuvendor != X86_VENDOR_HYGON)
289 		pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
290 }
291 
292 #define PANIC_TIMEOUT 5 /* 5 seconds */
293 
294 static atomic_t mce_panicked;
295 
296 static int fake_panic;
297 static atomic_t mce_fake_panicked;
298 
299 /* Panic in progress. Enable interrupts and wait for final IPI */
300 static void wait_for_panic(void)
301 {
302 	long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
303 
304 	preempt_disable();
305 	local_irq_enable();
306 	while (timeout-- > 0)
307 		udelay(1);
308 	if (panic_timeout == 0)
309 		panic_timeout = mca_cfg.panic_timeout;
310 	panic("Panicing machine check CPU died");
311 }
312 
313 static void mce_panic(const char *msg, struct mce *final, char *exp)
314 {
315 	int apei_err = 0;
316 	struct llist_node *pending;
317 	struct mce_evt_llist *l;
318 
319 	if (!fake_panic) {
320 		/*
321 		 * Make sure only one CPU runs in machine check panic
322 		 */
323 		if (atomic_inc_return(&mce_panicked) > 1)
324 			wait_for_panic();
325 		barrier();
326 
327 		bust_spinlocks(1);
328 		console_verbose();
329 	} else {
330 		/* Don't log too much for fake panic */
331 		if (atomic_inc_return(&mce_fake_panicked) > 1)
332 			return;
333 	}
334 	pending = mce_gen_pool_prepare_records();
335 	/* First print corrected ones that are still unlogged */
336 	llist_for_each_entry(l, pending, llnode) {
337 		struct mce *m = &l->mce;
338 		if (!(m->status & MCI_STATUS_UC)) {
339 			print_mce(m);
340 			if (!apei_err)
341 				apei_err = apei_write_mce(m);
342 		}
343 	}
344 	/* Now print uncorrected but with the final one last */
345 	llist_for_each_entry(l, pending, llnode) {
346 		struct mce *m = &l->mce;
347 		if (!(m->status & MCI_STATUS_UC))
348 			continue;
349 		if (!final || mce_cmp(m, final)) {
350 			print_mce(m);
351 			if (!apei_err)
352 				apei_err = apei_write_mce(m);
353 		}
354 	}
355 	if (final) {
356 		print_mce(final);
357 		if (!apei_err)
358 			apei_err = apei_write_mce(final);
359 	}
360 	if (cpu_missing)
361 		pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
362 	if (exp)
363 		pr_emerg(HW_ERR "Machine check: %s\n", exp);
364 	if (!fake_panic) {
365 		if (panic_timeout == 0)
366 			panic_timeout = mca_cfg.panic_timeout;
367 		panic(msg);
368 	} else
369 		pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
370 }
371 
372 /* Support code for software error injection */
373 
374 static int msr_to_offset(u32 msr)
375 {
376 	unsigned bank = __this_cpu_read(injectm.bank);
377 
378 	if (msr == mca_cfg.rip_msr)
379 		return offsetof(struct mce, ip);
380 	if (msr == msr_ops.status(bank))
381 		return offsetof(struct mce, status);
382 	if (msr == msr_ops.addr(bank))
383 		return offsetof(struct mce, addr);
384 	if (msr == msr_ops.misc(bank))
385 		return offsetof(struct mce, misc);
386 	if (msr == MSR_IA32_MCG_STATUS)
387 		return offsetof(struct mce, mcgstatus);
388 	return -1;
389 }
390 
391 /* MSR access wrappers used for error injection */
392 static u64 mce_rdmsrl(u32 msr)
393 {
394 	u64 v;
395 
396 	if (__this_cpu_read(injectm.finished)) {
397 		int offset = msr_to_offset(msr);
398 
399 		if (offset < 0)
400 			return 0;
401 		return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
402 	}
403 
404 	if (rdmsrl_safe(msr, &v)) {
405 		WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr);
406 		/*
407 		 * Return zero in case the access faulted. This should
408 		 * not happen normally but can happen if the CPU does
409 		 * something weird, or if the code is buggy.
410 		 */
411 		v = 0;
412 	}
413 
414 	return v;
415 }
416 
417 static void mce_wrmsrl(u32 msr, u64 v)
418 {
419 	if (__this_cpu_read(injectm.finished)) {
420 		int offset = msr_to_offset(msr);
421 
422 		if (offset >= 0)
423 			*(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
424 		return;
425 	}
426 	wrmsrl(msr, v);
427 }
428 
429 /*
430  * Collect all global (w.r.t. this processor) status about this machine
431  * check into our "mce" struct so that we can use it later to assess
432  * the severity of the problem as we read per-bank specific details.
433  */
434 static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
435 {
436 	mce_setup(m);
437 
438 	m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
439 	if (regs) {
440 		/*
441 		 * Get the address of the instruction at the time of
442 		 * the machine check error.
443 		 */
444 		if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
445 			m->ip = regs->ip;
446 			m->cs = regs->cs;
447 
448 			/*
449 			 * When in VM86 mode make the cs look like ring 3
450 			 * always. This is a lie, but it's better than passing
451 			 * the additional vm86 bit around everywhere.
452 			 */
453 			if (v8086_mode(regs))
454 				m->cs |= 3;
455 		}
456 		/* Use accurate RIP reporting if available. */
457 		if (mca_cfg.rip_msr)
458 			m->ip = mce_rdmsrl(mca_cfg.rip_msr);
459 	}
460 }
461 
462 int mce_available(struct cpuinfo_x86 *c)
463 {
464 	if (mca_cfg.disabled)
465 		return 0;
466 	return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
467 }
468 
469 static void mce_schedule_work(void)
470 {
471 	if (!mce_gen_pool_empty())
472 		schedule_work(&mce_work);
473 }
474 
475 static void mce_irq_work_cb(struct irq_work *entry)
476 {
477 	mce_schedule_work();
478 }
479 
480 /*
481  * Check if the address reported by the CPU is in a format we can parse.
482  * It would be possible to add code for most other cases, but all would
483  * be somewhat complicated (e.g. segment offset would require an instruction
484  * parser). So only support physical addresses up to page granuality for now.
485  */
486 int mce_usable_address(struct mce *m)
487 {
488 	if (!(m->status & MCI_STATUS_ADDRV))
489 		return 0;
490 
491 	/* Checks after this one are Intel/Zhaoxin-specific: */
492 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL &&
493 	    boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN)
494 		return 1;
495 
496 	if (!(m->status & MCI_STATUS_MISCV))
497 		return 0;
498 
499 	if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
500 		return 0;
501 
502 	if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
503 		return 0;
504 
505 	return 1;
506 }
507 EXPORT_SYMBOL_GPL(mce_usable_address);
508 
509 bool mce_is_memory_error(struct mce *m)
510 {
511 	switch (m->cpuvendor) {
512 	case X86_VENDOR_AMD:
513 	case X86_VENDOR_HYGON:
514 		return amd_mce_is_memory_error(m);
515 
516 	case X86_VENDOR_INTEL:
517 	case X86_VENDOR_ZHAOXIN:
518 		/*
519 		 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
520 		 *
521 		 * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for
522 		 * indicating a memory error. Bit 8 is used for indicating a
523 		 * cache hierarchy error. The combination of bit 2 and bit 3
524 		 * is used for indicating a `generic' cache hierarchy error
525 		 * But we can't just blindly check the above bits, because if
526 		 * bit 11 is set, then it is a bus/interconnect error - and
527 		 * either way the above bits just gives more detail on what
528 		 * bus/interconnect error happened. Note that bit 12 can be
529 		 * ignored, as it's the "filter" bit.
530 		 */
531 		return (m->status & 0xef80) == BIT(7) ||
532 		       (m->status & 0xef00) == BIT(8) ||
533 		       (m->status & 0xeffc) == 0xc;
534 
535 	default:
536 		return false;
537 	}
538 }
539 EXPORT_SYMBOL_GPL(mce_is_memory_error);
540 
541 bool mce_is_correctable(struct mce *m)
542 {
543 	if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
544 		return false;
545 
546 	if (m->cpuvendor == X86_VENDOR_HYGON && m->status & MCI_STATUS_DEFERRED)
547 		return false;
548 
549 	if (m->status & MCI_STATUS_UC)
550 		return false;
551 
552 	return true;
553 }
554 EXPORT_SYMBOL_GPL(mce_is_correctable);
555 
556 static bool cec_add_mce(struct mce *m)
557 {
558 	if (!m)
559 		return false;
560 
561 	/* We eat only correctable DRAM errors with usable addresses. */
562 	if (mce_is_memory_error(m) &&
563 	    mce_is_correctable(m)  &&
564 	    mce_usable_address(m))
565 		if (!cec_add_elem(m->addr >> PAGE_SHIFT))
566 			return true;
567 
568 	return false;
569 }
570 
571 static int mce_first_notifier(struct notifier_block *nb, unsigned long val,
572 			      void *data)
573 {
574 	struct mce *m = (struct mce *)data;
575 
576 	if (!m)
577 		return NOTIFY_DONE;
578 
579 	if (cec_add_mce(m))
580 		return NOTIFY_STOP;
581 
582 	/* Emit the trace record: */
583 	trace_mce_record(m);
584 
585 	set_bit(0, &mce_need_notify);
586 
587 	mce_notify_irq();
588 
589 	return NOTIFY_DONE;
590 }
591 
592 static struct notifier_block first_nb = {
593 	.notifier_call	= mce_first_notifier,
594 	.priority	= MCE_PRIO_FIRST,
595 };
596 
597 static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
598 				void *data)
599 {
600 	struct mce *mce = (struct mce *)data;
601 	unsigned long pfn;
602 
603 	if (!mce)
604 		return NOTIFY_DONE;
605 
606 	if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
607 		pfn = mce->addr >> PAGE_SHIFT;
608 		if (!memory_failure(pfn, 0))
609 			set_mce_nospec(pfn);
610 	}
611 
612 	return NOTIFY_OK;
613 }
614 static struct notifier_block mce_srao_nb = {
615 	.notifier_call	= srao_decode_notifier,
616 	.priority	= MCE_PRIO_SRAO,
617 };
618 
619 static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
620 				void *data)
621 {
622 	struct mce *m = (struct mce *)data;
623 
624 	if (!m)
625 		return NOTIFY_DONE;
626 
627 	if (atomic_read(&num_notifiers) > NUM_DEFAULT_NOTIFIERS)
628 		return NOTIFY_DONE;
629 
630 	__print_mce(m);
631 
632 	return NOTIFY_DONE;
633 }
634 
635 static struct notifier_block mce_default_nb = {
636 	.notifier_call	= mce_default_notifier,
637 	/* lowest prio, we want it to run last. */
638 	.priority	= MCE_PRIO_LOWEST,
639 };
640 
641 /*
642  * Read ADDR and MISC registers.
643  */
644 static void mce_read_aux(struct mce *m, int i)
645 {
646 	if (m->status & MCI_STATUS_MISCV)
647 		m->misc = mce_rdmsrl(msr_ops.misc(i));
648 
649 	if (m->status & MCI_STATUS_ADDRV) {
650 		m->addr = mce_rdmsrl(msr_ops.addr(i));
651 
652 		/*
653 		 * Mask the reported address by the reported granularity.
654 		 */
655 		if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) {
656 			u8 shift = MCI_MISC_ADDR_LSB(m->misc);
657 			m->addr >>= shift;
658 			m->addr <<= shift;
659 		}
660 
661 		/*
662 		 * Extract [55:<lsb>] where lsb is the least significant
663 		 * *valid* bit of the address bits.
664 		 */
665 		if (mce_flags.smca) {
666 			u8 lsb = (m->addr >> 56) & 0x3f;
667 
668 			m->addr &= GENMASK_ULL(55, lsb);
669 		}
670 	}
671 
672 	if (mce_flags.smca) {
673 		m->ipid = mce_rdmsrl(MSR_AMD64_SMCA_MCx_IPID(i));
674 
675 		if (m->status & MCI_STATUS_SYNDV)
676 			m->synd = mce_rdmsrl(MSR_AMD64_SMCA_MCx_SYND(i));
677 	}
678 }
679 
680 DEFINE_PER_CPU(unsigned, mce_poll_count);
681 
682 /*
683  * Poll for corrected events or events that happened before reset.
684  * Those are just logged through /dev/mcelog.
685  *
686  * This is executed in standard interrupt context.
687  *
688  * Note: spec recommends to panic for fatal unsignalled
689  * errors here. However this would be quite problematic --
690  * we would need to reimplement the Monarch handling and
691  * it would mess up the exclusion between exception handler
692  * and poll handler -- * so we skip this for now.
693  * These cases should not happen anyways, or only when the CPU
694  * is already totally * confused. In this case it's likely it will
695  * not fully execute the machine check handler either.
696  */
697 bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
698 {
699 	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
700 	bool error_seen = false;
701 	struct mce m;
702 	int i;
703 
704 	this_cpu_inc(mce_poll_count);
705 
706 	mce_gather_info(&m, NULL);
707 
708 	if (flags & MCP_TIMESTAMP)
709 		m.tsc = rdtsc();
710 
711 	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
712 		if (!mce_banks[i].ctl || !test_bit(i, *b))
713 			continue;
714 
715 		m.misc = 0;
716 		m.addr = 0;
717 		m.bank = i;
718 
719 		barrier();
720 		m.status = mce_rdmsrl(msr_ops.status(i));
721 
722 		/* If this entry is not valid, ignore it */
723 		if (!(m.status & MCI_STATUS_VAL))
724 			continue;
725 
726 		/*
727 		 * If we are logging everything (at CPU online) or this
728 		 * is a corrected error, then we must log it.
729 		 */
730 		if ((flags & MCP_UC) || !(m.status & MCI_STATUS_UC))
731 			goto log_it;
732 
733 		/*
734 		 * Newer Intel systems that support software error
735 		 * recovery need to make additional checks. Other
736 		 * CPUs should skip over uncorrected errors, but log
737 		 * everything else.
738 		 */
739 		if (!mca_cfg.ser) {
740 			if (m.status & MCI_STATUS_UC)
741 				continue;
742 			goto log_it;
743 		}
744 
745 		/* Log "not enabled" (speculative) errors */
746 		if (!(m.status & MCI_STATUS_EN))
747 			goto log_it;
748 
749 		/*
750 		 * Log UCNA (SDM: 15.6.3 "UCR Error Classification")
751 		 * UC == 1 && PCC == 0 && S == 0
752 		 */
753 		if (!(m.status & MCI_STATUS_PCC) && !(m.status & MCI_STATUS_S))
754 			goto log_it;
755 
756 		/*
757 		 * Skip anything else. Presumption is that our read of this
758 		 * bank is racing with a machine check. Leave the log alone
759 		 * for do_machine_check() to deal with it.
760 		 */
761 		continue;
762 
763 log_it:
764 		error_seen = true;
765 
766 		mce_read_aux(&m, i);
767 
768 		m.severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
769 
770 		/*
771 		 * Don't get the IP here because it's unlikely to
772 		 * have anything to do with the actual error location.
773 		 */
774 		if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce)
775 			mce_log(&m);
776 		else if (mce_usable_address(&m)) {
777 			/*
778 			 * Although we skipped logging this, we still want
779 			 * to take action. Add to the pool so the registered
780 			 * notifiers will see it.
781 			 */
782 			if (!mce_gen_pool_add(&m))
783 				mce_schedule_work();
784 		}
785 
786 		/*
787 		 * Clear state for this bank.
788 		 */
789 		mce_wrmsrl(msr_ops.status(i), 0);
790 	}
791 
792 	/*
793 	 * Don't clear MCG_STATUS here because it's only defined for
794 	 * exceptions.
795 	 */
796 
797 	sync_core();
798 
799 	return error_seen;
800 }
801 EXPORT_SYMBOL_GPL(machine_check_poll);
802 
803 /*
804  * Do a quick check if any of the events requires a panic.
805  * This decides if we keep the events around or clear them.
806  */
807 static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
808 			  struct pt_regs *regs)
809 {
810 	char *tmp;
811 	int i;
812 
813 	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
814 		m->status = mce_rdmsrl(msr_ops.status(i));
815 		if (!(m->status & MCI_STATUS_VAL))
816 			continue;
817 
818 		__set_bit(i, validp);
819 		if (quirk_no_way_out)
820 			quirk_no_way_out(i, m, regs);
821 
822 		if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
823 			m->bank = i;
824 			mce_read_aux(m, i);
825 			*msg = tmp;
826 			return 1;
827 		}
828 	}
829 	return 0;
830 }
831 
832 /*
833  * Variable to establish order between CPUs while scanning.
834  * Each CPU spins initially until executing is equal its number.
835  */
836 static atomic_t mce_executing;
837 
838 /*
839  * Defines order of CPUs on entry. First CPU becomes Monarch.
840  */
841 static atomic_t mce_callin;
842 
843 /*
844  * Check if a timeout waiting for other CPUs happened.
845  */
846 static int mce_timed_out(u64 *t, const char *msg)
847 {
848 	/*
849 	 * The others already did panic for some reason.
850 	 * Bail out like in a timeout.
851 	 * rmb() to tell the compiler that system_state
852 	 * might have been modified by someone else.
853 	 */
854 	rmb();
855 	if (atomic_read(&mce_panicked))
856 		wait_for_panic();
857 	if (!mca_cfg.monarch_timeout)
858 		goto out;
859 	if ((s64)*t < SPINUNIT) {
860 		if (mca_cfg.tolerant <= 1)
861 			mce_panic(msg, NULL, NULL);
862 		cpu_missing = 1;
863 		return 1;
864 	}
865 	*t -= SPINUNIT;
866 out:
867 	touch_nmi_watchdog();
868 	return 0;
869 }
870 
871 /*
872  * The Monarch's reign.  The Monarch is the CPU who entered
873  * the machine check handler first. It waits for the others to
874  * raise the exception too and then grades them. When any
875  * error is fatal panic. Only then let the others continue.
876  *
877  * The other CPUs entering the MCE handler will be controlled by the
878  * Monarch. They are called Subjects.
879  *
880  * This way we prevent any potential data corruption in a unrecoverable case
881  * and also makes sure always all CPU's errors are examined.
882  *
883  * Also this detects the case of a machine check event coming from outer
884  * space (not detected by any CPUs) In this case some external agent wants
885  * us to shut down, so panic too.
886  *
887  * The other CPUs might still decide to panic if the handler happens
888  * in a unrecoverable place, but in this case the system is in a semi-stable
889  * state and won't corrupt anything by itself. It's ok to let the others
890  * continue for a bit first.
891  *
892  * All the spin loops have timeouts; when a timeout happens a CPU
893  * typically elects itself to be Monarch.
894  */
895 static void mce_reign(void)
896 {
897 	int cpu;
898 	struct mce *m = NULL;
899 	int global_worst = 0;
900 	char *msg = NULL;
901 	char *nmsg = NULL;
902 
903 	/*
904 	 * This CPU is the Monarch and the other CPUs have run
905 	 * through their handlers.
906 	 * Grade the severity of the errors of all the CPUs.
907 	 */
908 	for_each_possible_cpu(cpu) {
909 		int severity = mce_severity(&per_cpu(mces_seen, cpu),
910 					    mca_cfg.tolerant,
911 					    &nmsg, true);
912 		if (severity > global_worst) {
913 			msg = nmsg;
914 			global_worst = severity;
915 			m = &per_cpu(mces_seen, cpu);
916 		}
917 	}
918 
919 	/*
920 	 * Cannot recover? Panic here then.
921 	 * This dumps all the mces in the log buffer and stops the
922 	 * other CPUs.
923 	 */
924 	if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
925 		mce_panic("Fatal machine check", m, msg);
926 
927 	/*
928 	 * For UC somewhere we let the CPU who detects it handle it.
929 	 * Also must let continue the others, otherwise the handling
930 	 * CPU could deadlock on a lock.
931 	 */
932 
933 	/*
934 	 * No machine check event found. Must be some external
935 	 * source or one CPU is hung. Panic.
936 	 */
937 	if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3)
938 		mce_panic("Fatal machine check from unknown source", NULL, NULL);
939 
940 	/*
941 	 * Now clear all the mces_seen so that they don't reappear on
942 	 * the next mce.
943 	 */
944 	for_each_possible_cpu(cpu)
945 		memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
946 }
947 
948 static atomic_t global_nwo;
949 
950 /*
951  * Start of Monarch synchronization. This waits until all CPUs have
952  * entered the exception handler and then determines if any of them
953  * saw a fatal event that requires panic. Then it executes them
954  * in the entry order.
955  * TBD double check parallel CPU hotunplug
956  */
957 static int mce_start(int *no_way_out)
958 {
959 	int order;
960 	int cpus = num_online_cpus();
961 	u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
962 
963 	if (!timeout)
964 		return -1;
965 
966 	atomic_add(*no_way_out, &global_nwo);
967 	/*
968 	 * Rely on the implied barrier below, such that global_nwo
969 	 * is updated before mce_callin.
970 	 */
971 	order = atomic_inc_return(&mce_callin);
972 
973 	/*
974 	 * Wait for everyone.
975 	 */
976 	while (atomic_read(&mce_callin) != cpus) {
977 		if (mce_timed_out(&timeout,
978 				  "Timeout: Not all CPUs entered broadcast exception handler")) {
979 			atomic_set(&global_nwo, 0);
980 			return -1;
981 		}
982 		ndelay(SPINUNIT);
983 	}
984 
985 	/*
986 	 * mce_callin should be read before global_nwo
987 	 */
988 	smp_rmb();
989 
990 	if (order == 1) {
991 		/*
992 		 * Monarch: Starts executing now, the others wait.
993 		 */
994 		atomic_set(&mce_executing, 1);
995 	} else {
996 		/*
997 		 * Subject: Now start the scanning loop one by one in
998 		 * the original callin order.
999 		 * This way when there are any shared banks it will be
1000 		 * only seen by one CPU before cleared, avoiding duplicates.
1001 		 */
1002 		while (atomic_read(&mce_executing) < order) {
1003 			if (mce_timed_out(&timeout,
1004 					  "Timeout: Subject CPUs unable to finish machine check processing")) {
1005 				atomic_set(&global_nwo, 0);
1006 				return -1;
1007 			}
1008 			ndelay(SPINUNIT);
1009 		}
1010 	}
1011 
1012 	/*
1013 	 * Cache the global no_way_out state.
1014 	 */
1015 	*no_way_out = atomic_read(&global_nwo);
1016 
1017 	return order;
1018 }
1019 
1020 /*
1021  * Synchronize between CPUs after main scanning loop.
1022  * This invokes the bulk of the Monarch processing.
1023  */
1024 static int mce_end(int order)
1025 {
1026 	int ret = -1;
1027 	u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
1028 
1029 	if (!timeout)
1030 		goto reset;
1031 	if (order < 0)
1032 		goto reset;
1033 
1034 	/*
1035 	 * Allow others to run.
1036 	 */
1037 	atomic_inc(&mce_executing);
1038 
1039 	if (order == 1) {
1040 		/* CHECKME: Can this race with a parallel hotplug? */
1041 		int cpus = num_online_cpus();
1042 
1043 		/*
1044 		 * Monarch: Wait for everyone to go through their scanning
1045 		 * loops.
1046 		 */
1047 		while (atomic_read(&mce_executing) <= cpus) {
1048 			if (mce_timed_out(&timeout,
1049 					  "Timeout: Monarch CPU unable to finish machine check processing"))
1050 				goto reset;
1051 			ndelay(SPINUNIT);
1052 		}
1053 
1054 		mce_reign();
1055 		barrier();
1056 		ret = 0;
1057 	} else {
1058 		/*
1059 		 * Subject: Wait for Monarch to finish.
1060 		 */
1061 		while (atomic_read(&mce_executing) != 0) {
1062 			if (mce_timed_out(&timeout,
1063 					  "Timeout: Monarch CPU did not finish machine check processing"))
1064 				goto reset;
1065 			ndelay(SPINUNIT);
1066 		}
1067 
1068 		/*
1069 		 * Don't reset anything. That's done by the Monarch.
1070 		 */
1071 		return 0;
1072 	}
1073 
1074 	/*
1075 	 * Reset all global state.
1076 	 */
1077 reset:
1078 	atomic_set(&global_nwo, 0);
1079 	atomic_set(&mce_callin, 0);
1080 	barrier();
1081 
1082 	/*
1083 	 * Let others run again.
1084 	 */
1085 	atomic_set(&mce_executing, 0);
1086 	return ret;
1087 }
1088 
1089 static void mce_clear_state(unsigned long *toclear)
1090 {
1091 	int i;
1092 
1093 	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1094 		if (test_bit(i, toclear))
1095 			mce_wrmsrl(msr_ops.status(i), 0);
1096 	}
1097 }
1098 
1099 static int do_memory_failure(struct mce *m)
1100 {
1101 	int flags = MF_ACTION_REQUIRED;
1102 	int ret;
1103 
1104 	pr_err("Uncorrected hardware memory error in user-access at %llx", m->addr);
1105 	if (!(m->mcgstatus & MCG_STATUS_RIPV))
1106 		flags |= MF_MUST_KILL;
1107 	ret = memory_failure(m->addr >> PAGE_SHIFT, flags);
1108 	if (ret)
1109 		pr_err("Memory error not recovered");
1110 	else
1111 		set_mce_nospec(m->addr >> PAGE_SHIFT);
1112 	return ret;
1113 }
1114 
1115 
1116 /*
1117  * Cases where we avoid rendezvous handler timeout:
1118  * 1) If this CPU is offline.
1119  *
1120  * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to
1121  *  skip those CPUs which remain looping in the 1st kernel - see
1122  *  crash_nmi_callback().
1123  *
1124  * Note: there still is a small window between kexec-ing and the new,
1125  * kdump kernel establishing a new #MC handler where a broadcasted MCE
1126  * might not get handled properly.
1127  */
1128 static bool __mc_check_crashing_cpu(int cpu)
1129 {
1130 	if (cpu_is_offline(cpu) ||
1131 	    (crashing_cpu != -1 && crashing_cpu != cpu)) {
1132 		u64 mcgstatus;
1133 
1134 		mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
1135 
1136 		if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) {
1137 			if (mcgstatus & MCG_STATUS_LMCES)
1138 				return false;
1139 		}
1140 
1141 		if (mcgstatus & MCG_STATUS_RIPV) {
1142 			mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1143 			return true;
1144 		}
1145 	}
1146 	return false;
1147 }
1148 
1149 static void __mc_scan_banks(struct mce *m, struct mce *final,
1150 			    unsigned long *toclear, unsigned long *valid_banks,
1151 			    int no_way_out, int *worst)
1152 {
1153 	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1154 	struct mca_config *cfg = &mca_cfg;
1155 	int severity, i;
1156 
1157 	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1158 		__clear_bit(i, toclear);
1159 		if (!test_bit(i, valid_banks))
1160 			continue;
1161 
1162 		if (!mce_banks[i].ctl)
1163 			continue;
1164 
1165 		m->misc = 0;
1166 		m->addr = 0;
1167 		m->bank = i;
1168 
1169 		m->status = mce_rdmsrl(msr_ops.status(i));
1170 		if (!(m->status & MCI_STATUS_VAL))
1171 			continue;
1172 
1173 		/*
1174 		 * Corrected or non-signaled errors are handled by
1175 		 * machine_check_poll(). Leave them alone, unless this panics.
1176 		 */
1177 		if (!(m->status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
1178 			!no_way_out)
1179 			continue;
1180 
1181 		/* Set taint even when machine check was not enabled. */
1182 		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
1183 
1184 		severity = mce_severity(m, cfg->tolerant, NULL, true);
1185 
1186 		/*
1187 		 * When machine check was for corrected/deferred handler don't
1188 		 * touch, unless we're panicking.
1189 		 */
1190 		if ((severity == MCE_KEEP_SEVERITY ||
1191 		     severity == MCE_UCNA_SEVERITY) && !no_way_out)
1192 			continue;
1193 
1194 		__set_bit(i, toclear);
1195 
1196 		/* Machine check event was not enabled. Clear, but ignore. */
1197 		if (severity == MCE_NO_SEVERITY)
1198 			continue;
1199 
1200 		mce_read_aux(m, i);
1201 
1202 		/* assuming valid severity level != 0 */
1203 		m->severity = severity;
1204 
1205 		mce_log(m);
1206 
1207 		if (severity > *worst) {
1208 			*final = *m;
1209 			*worst = severity;
1210 		}
1211 	}
1212 
1213 	/* mce_clear_state will clear *final, save locally for use later */
1214 	*m = *final;
1215 }
1216 
1217 /*
1218  * The actual machine check handler. This only handles real
1219  * exceptions when something got corrupted coming in through int 18.
1220  *
1221  * This is executed in NMI context not subject to normal locking rules. This
1222  * implies that most kernel services cannot be safely used. Don't even
1223  * think about putting a printk in there!
1224  *
1225  * On Intel systems this is entered on all CPUs in parallel through
1226  * MCE broadcast. However some CPUs might be broken beyond repair,
1227  * so be always careful when synchronizing with others.
1228  */
1229 void do_machine_check(struct pt_regs *regs, long error_code)
1230 {
1231 	DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
1232 	DECLARE_BITMAP(toclear, MAX_NR_BANKS);
1233 	struct mca_config *cfg = &mca_cfg;
1234 	int cpu = smp_processor_id();
1235 	char *msg = "Unknown";
1236 	struct mce m, *final;
1237 	int worst = 0;
1238 
1239 	/*
1240 	 * Establish sequential order between the CPUs entering the machine
1241 	 * check handler.
1242 	 */
1243 	int order = -1;
1244 
1245 	/*
1246 	 * If no_way_out gets set, there is no safe way to recover from this
1247 	 * MCE.  If mca_cfg.tolerant is cranked up, we'll try anyway.
1248 	 */
1249 	int no_way_out = 0;
1250 
1251 	/*
1252 	 * If kill_it gets set, there might be a way to recover from this
1253 	 * error.
1254 	 */
1255 	int kill_it = 0;
1256 
1257 	/*
1258 	 * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
1259 	 * on Intel.
1260 	 */
1261 	int lmce = 1;
1262 
1263 	if (__mc_check_crashing_cpu(cpu))
1264 		return;
1265 
1266 	ist_enter(regs);
1267 
1268 	this_cpu_inc(mce_exception_count);
1269 
1270 	mce_gather_info(&m, regs);
1271 	m.tsc = rdtsc();
1272 
1273 	final = this_cpu_ptr(&mces_seen);
1274 	*final = m;
1275 
1276 	memset(valid_banks, 0, sizeof(valid_banks));
1277 	no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
1278 
1279 	barrier();
1280 
1281 	/*
1282 	 * When no restart IP might need to kill or panic.
1283 	 * Assume the worst for now, but if we find the
1284 	 * severity is MCE_AR_SEVERITY we have other options.
1285 	 */
1286 	if (!(m.mcgstatus & MCG_STATUS_RIPV))
1287 		kill_it = 1;
1288 
1289 	/*
1290 	 * Check if this MCE is signaled to only this logical processor,
1291 	 * on Intel, Zhaoxin only.
1292 	 */
1293 	if (m.cpuvendor == X86_VENDOR_INTEL ||
1294 	    m.cpuvendor == X86_VENDOR_ZHAOXIN)
1295 		lmce = m.mcgstatus & MCG_STATUS_LMCES;
1296 
1297 	/*
1298 	 * Local machine check may already know that we have to panic.
1299 	 * Broadcast machine check begins rendezvous in mce_start()
1300 	 * Go through all banks in exclusion of the other CPUs. This way we
1301 	 * don't report duplicated events on shared banks because the first one
1302 	 * to see it will clear it.
1303 	 */
1304 	if (lmce) {
1305 		if (no_way_out)
1306 			mce_panic("Fatal local machine check", &m, msg);
1307 	} else {
1308 		order = mce_start(&no_way_out);
1309 	}
1310 
1311 	__mc_scan_banks(&m, final, toclear, valid_banks, no_way_out, &worst);
1312 
1313 	if (!no_way_out)
1314 		mce_clear_state(toclear);
1315 
1316 	/*
1317 	 * Do most of the synchronization with other CPUs.
1318 	 * When there's any problem use only local no_way_out state.
1319 	 */
1320 	if (!lmce) {
1321 		if (mce_end(order) < 0)
1322 			no_way_out = worst >= MCE_PANIC_SEVERITY;
1323 	} else {
1324 		/*
1325 		 * If there was a fatal machine check we should have
1326 		 * already called mce_panic earlier in this function.
1327 		 * Since we re-read the banks, we might have found
1328 		 * something new. Check again to see if we found a
1329 		 * fatal error. We call "mce_severity()" again to
1330 		 * make sure we have the right "msg".
1331 		 */
1332 		if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
1333 			mce_severity(&m, cfg->tolerant, &msg, true);
1334 			mce_panic("Local fatal machine check!", &m, msg);
1335 		}
1336 	}
1337 
1338 	/*
1339 	 * If tolerant is at an insane level we drop requests to kill
1340 	 * processes and continue even when there is no way out.
1341 	 */
1342 	if (cfg->tolerant == 3)
1343 		kill_it = 0;
1344 	else if (no_way_out)
1345 		mce_panic("Fatal machine check on current CPU", &m, msg);
1346 
1347 	if (worst > 0)
1348 		irq_work_queue(&mce_irq_work);
1349 
1350 	mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1351 
1352 	sync_core();
1353 
1354 	if (worst != MCE_AR_SEVERITY && !kill_it)
1355 		goto out_ist;
1356 
1357 	/* Fault was in user mode and we need to take some action */
1358 	if ((m.cs & 3) == 3) {
1359 		ist_begin_non_atomic(regs);
1360 		local_irq_enable();
1361 
1362 		if (kill_it || do_memory_failure(&m))
1363 			force_sig(SIGBUS);
1364 		local_irq_disable();
1365 		ist_end_non_atomic();
1366 	} else {
1367 		if (!fixup_exception(regs, X86_TRAP_MC, error_code, 0))
1368 			mce_panic("Failed kernel mode recovery", &m, NULL);
1369 	}
1370 
1371 out_ist:
1372 	ist_exit(regs);
1373 }
1374 EXPORT_SYMBOL_GPL(do_machine_check);
1375 
1376 #ifndef CONFIG_MEMORY_FAILURE
1377 int memory_failure(unsigned long pfn, int flags)
1378 {
1379 	/* mce_severity() should not hand us an ACTION_REQUIRED error */
1380 	BUG_ON(flags & MF_ACTION_REQUIRED);
1381 	pr_err("Uncorrected memory error in page 0x%lx ignored\n"
1382 	       "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
1383 	       pfn);
1384 
1385 	return 0;
1386 }
1387 #endif
1388 
1389 /*
1390  * Periodic polling timer for "silent" machine check errors.  If the
1391  * poller finds an MCE, poll 2x faster.  When the poller finds no more
1392  * errors, poll 2x slower (up to check_interval seconds).
1393  */
1394 static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
1395 
1396 static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
1397 static DEFINE_PER_CPU(struct timer_list, mce_timer);
1398 
1399 static unsigned long mce_adjust_timer_default(unsigned long interval)
1400 {
1401 	return interval;
1402 }
1403 
1404 static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
1405 
1406 static void __start_timer(struct timer_list *t, unsigned long interval)
1407 {
1408 	unsigned long when = jiffies + interval;
1409 	unsigned long flags;
1410 
1411 	local_irq_save(flags);
1412 
1413 	if (!timer_pending(t) || time_before(when, t->expires))
1414 		mod_timer(t, round_jiffies(when));
1415 
1416 	local_irq_restore(flags);
1417 }
1418 
1419 static void mce_timer_fn(struct timer_list *t)
1420 {
1421 	struct timer_list *cpu_t = this_cpu_ptr(&mce_timer);
1422 	unsigned long iv;
1423 
1424 	WARN_ON(cpu_t != t);
1425 
1426 	iv = __this_cpu_read(mce_next_interval);
1427 
1428 	if (mce_available(this_cpu_ptr(&cpu_info))) {
1429 		machine_check_poll(0, this_cpu_ptr(&mce_poll_banks));
1430 
1431 		if (mce_intel_cmci_poll()) {
1432 			iv = mce_adjust_timer(iv);
1433 			goto done;
1434 		}
1435 	}
1436 
1437 	/*
1438 	 * Alert userspace if needed. If we logged an MCE, reduce the polling
1439 	 * interval, otherwise increase the polling interval.
1440 	 */
1441 	if (mce_notify_irq())
1442 		iv = max(iv / 2, (unsigned long) HZ/100);
1443 	else
1444 		iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
1445 
1446 done:
1447 	__this_cpu_write(mce_next_interval, iv);
1448 	__start_timer(t, iv);
1449 }
1450 
1451 /*
1452  * Ensure that the timer is firing in @interval from now.
1453  */
1454 void mce_timer_kick(unsigned long interval)
1455 {
1456 	struct timer_list *t = this_cpu_ptr(&mce_timer);
1457 	unsigned long iv = __this_cpu_read(mce_next_interval);
1458 
1459 	__start_timer(t, interval);
1460 
1461 	if (interval < iv)
1462 		__this_cpu_write(mce_next_interval, interval);
1463 }
1464 
1465 /* Must not be called in IRQ context where del_timer_sync() can deadlock */
1466 static void mce_timer_delete_all(void)
1467 {
1468 	int cpu;
1469 
1470 	for_each_online_cpu(cpu)
1471 		del_timer_sync(&per_cpu(mce_timer, cpu));
1472 }
1473 
1474 /*
1475  * Notify the user(s) about new machine check events.
1476  * Can be called from interrupt context, but not from machine check/NMI
1477  * context.
1478  */
1479 int mce_notify_irq(void)
1480 {
1481 	/* Not more than two messages every minute */
1482 	static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
1483 
1484 	if (test_and_clear_bit(0, &mce_need_notify)) {
1485 		mce_work_trigger();
1486 
1487 		if (__ratelimit(&ratelimit))
1488 			pr_info(HW_ERR "Machine check events logged\n");
1489 
1490 		return 1;
1491 	}
1492 	return 0;
1493 }
1494 EXPORT_SYMBOL_GPL(mce_notify_irq);
1495 
1496 static void __mcheck_cpu_mce_banks_init(void)
1497 {
1498 	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1499 	u8 n_banks = this_cpu_read(mce_num_banks);
1500 	int i;
1501 
1502 	for (i = 0; i < n_banks; i++) {
1503 		struct mce_bank *b = &mce_banks[i];
1504 
1505 		/*
1506 		 * Init them all, __mcheck_cpu_apply_quirks() is going to apply
1507 		 * the required vendor quirks before
1508 		 * __mcheck_cpu_init_clear_banks() does the final bank setup.
1509 		 */
1510 		b->ctl = -1ULL;
1511 		b->init = 1;
1512 	}
1513 }
1514 
1515 /*
1516  * Initialize Machine Checks for a CPU.
1517  */
1518 static void __mcheck_cpu_cap_init(void)
1519 {
1520 	u64 cap;
1521 	u8 b;
1522 
1523 	rdmsrl(MSR_IA32_MCG_CAP, cap);
1524 
1525 	b = cap & MCG_BANKCNT_MASK;
1526 
1527 	if (b > MAX_NR_BANKS) {
1528 		pr_warn("CPU%d: Using only %u machine check banks out of %u\n",
1529 			smp_processor_id(), MAX_NR_BANKS, b);
1530 		b = MAX_NR_BANKS;
1531 	}
1532 
1533 	this_cpu_write(mce_num_banks, b);
1534 
1535 	__mcheck_cpu_mce_banks_init();
1536 
1537 	/* Use accurate RIP reporting if available. */
1538 	if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
1539 		mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
1540 
1541 	if (cap & MCG_SER_P)
1542 		mca_cfg.ser = 1;
1543 }
1544 
1545 static void __mcheck_cpu_init_generic(void)
1546 {
1547 	enum mcp_flags m_fl = 0;
1548 	mce_banks_t all_banks;
1549 	u64 cap;
1550 
1551 	if (!mca_cfg.bootlog)
1552 		m_fl = MCP_DONTLOG;
1553 
1554 	/*
1555 	 * Log the machine checks left over from the previous reset.
1556 	 */
1557 	bitmap_fill(all_banks, MAX_NR_BANKS);
1558 	machine_check_poll(MCP_UC | m_fl, &all_banks);
1559 
1560 	cr4_set_bits(X86_CR4_MCE);
1561 
1562 	rdmsrl(MSR_IA32_MCG_CAP, cap);
1563 	if (cap & MCG_CTL_P)
1564 		wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1565 }
1566 
1567 static void __mcheck_cpu_init_clear_banks(void)
1568 {
1569 	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1570 	int i;
1571 
1572 	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1573 		struct mce_bank *b = &mce_banks[i];
1574 
1575 		if (!b->init)
1576 			continue;
1577 		wrmsrl(msr_ops.ctl(i), b->ctl);
1578 		wrmsrl(msr_ops.status(i), 0);
1579 	}
1580 }
1581 
1582 /*
1583  * Do a final check to see if there are any unused/RAZ banks.
1584  *
1585  * This must be done after the banks have been initialized and any quirks have
1586  * been applied.
1587  *
1588  * Do not call this from any user-initiated flows, e.g. CPU hotplug or sysfs.
1589  * Otherwise, a user who disables a bank will not be able to re-enable it
1590  * without a system reboot.
1591  */
1592 static void __mcheck_cpu_check_banks(void)
1593 {
1594 	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1595 	u64 msrval;
1596 	int i;
1597 
1598 	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1599 		struct mce_bank *b = &mce_banks[i];
1600 
1601 		if (!b->init)
1602 			continue;
1603 
1604 		rdmsrl(msr_ops.ctl(i), msrval);
1605 		b->init = !!msrval;
1606 	}
1607 }
1608 
1609 /*
1610  * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
1611  * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
1612  * Vol 3B Table 15-20). But this confuses both the code that determines
1613  * whether the machine check occurred in kernel or user mode, and also
1614  * the severity assessment code. Pretend that EIPV was set, and take the
1615  * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
1616  */
1617 static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
1618 {
1619 	if (bank != 0)
1620 		return;
1621 	if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
1622 		return;
1623 	if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
1624 		          MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
1625 			  MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
1626 			  MCACOD)) !=
1627 			 (MCI_STATUS_UC|MCI_STATUS_EN|
1628 			  MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
1629 			  MCI_STATUS_AR|MCACOD_INSTR))
1630 		return;
1631 
1632 	m->mcgstatus |= MCG_STATUS_EIPV;
1633 	m->ip = regs->ip;
1634 	m->cs = regs->cs;
1635 }
1636 
1637 /* Add per CPU specific workarounds here */
1638 static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1639 {
1640 	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1641 	struct mca_config *cfg = &mca_cfg;
1642 
1643 	if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
1644 		pr_info("unknown CPU type - not enabling MCE support\n");
1645 		return -EOPNOTSUPP;
1646 	}
1647 
1648 	/* This should be disabled by the BIOS, but isn't always */
1649 	if (c->x86_vendor == X86_VENDOR_AMD) {
1650 		if (c->x86 == 15 && this_cpu_read(mce_num_banks) > 4) {
1651 			/*
1652 			 * disable GART TBL walk error reporting, which
1653 			 * trips off incorrectly with the IOMMU & 3ware
1654 			 * & Cerberus:
1655 			 */
1656 			clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
1657 		}
1658 		if (c->x86 < 0x11 && cfg->bootlog < 0) {
1659 			/*
1660 			 * Lots of broken BIOS around that don't clear them
1661 			 * by default and leave crap in there. Don't log:
1662 			 */
1663 			cfg->bootlog = 0;
1664 		}
1665 		/*
1666 		 * Various K7s with broken bank 0 around. Always disable
1667 		 * by default.
1668 		 */
1669 		if (c->x86 == 6 && this_cpu_read(mce_num_banks) > 0)
1670 			mce_banks[0].ctl = 0;
1671 
1672 		/*
1673 		 * overflow_recov is supported for F15h Models 00h-0fh
1674 		 * even though we don't have a CPUID bit for it.
1675 		 */
1676 		if (c->x86 == 0x15 && c->x86_model <= 0xf)
1677 			mce_flags.overflow_recov = 1;
1678 
1679 	}
1680 
1681 	if (c->x86_vendor == X86_VENDOR_INTEL) {
1682 		/*
1683 		 * SDM documents that on family 6 bank 0 should not be written
1684 		 * because it aliases to another special BIOS controlled
1685 		 * register.
1686 		 * But it's not aliased anymore on model 0x1a+
1687 		 * Don't ignore bank 0 completely because there could be a
1688 		 * valid event later, merely don't write CTL0.
1689 		 */
1690 
1691 		if (c->x86 == 6 && c->x86_model < 0x1A && this_cpu_read(mce_num_banks) > 0)
1692 			mce_banks[0].init = 0;
1693 
1694 		/*
1695 		 * All newer Intel systems support MCE broadcasting. Enable
1696 		 * synchronization with a one second timeout.
1697 		 */
1698 		if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
1699 			cfg->monarch_timeout < 0)
1700 			cfg->monarch_timeout = USEC_PER_SEC;
1701 
1702 		/*
1703 		 * There are also broken BIOSes on some Pentium M and
1704 		 * earlier systems:
1705 		 */
1706 		if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
1707 			cfg->bootlog = 0;
1708 
1709 		if (c->x86 == 6 && c->x86_model == 45)
1710 			quirk_no_way_out = quirk_sandybridge_ifu;
1711 	}
1712 
1713 	if (c->x86_vendor == X86_VENDOR_ZHAOXIN) {
1714 		/*
1715 		 * All newer Zhaoxin CPUs support MCE broadcasting. Enable
1716 		 * synchronization with a one second timeout.
1717 		 */
1718 		if (c->x86 > 6 || (c->x86_model == 0x19 || c->x86_model == 0x1f)) {
1719 			if (cfg->monarch_timeout < 0)
1720 				cfg->monarch_timeout = USEC_PER_SEC;
1721 		}
1722 	}
1723 
1724 	if (cfg->monarch_timeout < 0)
1725 		cfg->monarch_timeout = 0;
1726 	if (cfg->bootlog != 0)
1727 		cfg->panic_timeout = 30;
1728 
1729 	return 0;
1730 }
1731 
1732 static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1733 {
1734 	if (c->x86 != 5)
1735 		return 0;
1736 
1737 	switch (c->x86_vendor) {
1738 	case X86_VENDOR_INTEL:
1739 		intel_p5_mcheck_init(c);
1740 		return 1;
1741 		break;
1742 	case X86_VENDOR_CENTAUR:
1743 		winchip_mcheck_init(c);
1744 		return 1;
1745 		break;
1746 	default:
1747 		return 0;
1748 	}
1749 
1750 	return 0;
1751 }
1752 
1753 /*
1754  * Init basic CPU features needed for early decoding of MCEs.
1755  */
1756 static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
1757 {
1758 	if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) {
1759 		mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
1760 		mce_flags.succor	 = !!cpu_has(c, X86_FEATURE_SUCCOR);
1761 		mce_flags.smca		 = !!cpu_has(c, X86_FEATURE_SMCA);
1762 
1763 		if (mce_flags.smca) {
1764 			msr_ops.ctl	= smca_ctl_reg;
1765 			msr_ops.status	= smca_status_reg;
1766 			msr_ops.addr	= smca_addr_reg;
1767 			msr_ops.misc	= smca_misc_reg;
1768 		}
1769 	}
1770 }
1771 
1772 static void mce_centaur_feature_init(struct cpuinfo_x86 *c)
1773 {
1774 	struct mca_config *cfg = &mca_cfg;
1775 
1776 	 /*
1777 	  * All newer Centaur CPUs support MCE broadcasting. Enable
1778 	  * synchronization with a one second timeout.
1779 	  */
1780 	if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) ||
1781 	     c->x86 > 6) {
1782 		if (cfg->monarch_timeout < 0)
1783 			cfg->monarch_timeout = USEC_PER_SEC;
1784 	}
1785 }
1786 
1787 static void mce_zhaoxin_feature_init(struct cpuinfo_x86 *c)
1788 {
1789 	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1790 
1791 	/*
1792 	 * These CPUs have MCA bank 8 which reports only one error type called
1793 	 * SVAD (System View Address Decoder). The reporting of that error is
1794 	 * controlled by IA32_MC8.CTL.0.
1795 	 *
1796 	 * If enabled, prefetching on these CPUs will cause SVAD MCE when
1797 	 * virtual machines start and result in a system  panic. Always disable
1798 	 * bank 8 SVAD error by default.
1799 	 */
1800 	if ((c->x86 == 7 && c->x86_model == 0x1b) ||
1801 	    (c->x86_model == 0x19 || c->x86_model == 0x1f)) {
1802 		if (this_cpu_read(mce_num_banks) > 8)
1803 			mce_banks[8].ctl = 0;
1804 	}
1805 
1806 	intel_init_cmci();
1807 	intel_init_lmce();
1808 	mce_adjust_timer = cmci_intel_adjust_timer;
1809 }
1810 
1811 static void mce_zhaoxin_feature_clear(struct cpuinfo_x86 *c)
1812 {
1813 	intel_clear_lmce();
1814 }
1815 
1816 static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
1817 {
1818 	switch (c->x86_vendor) {
1819 	case X86_VENDOR_INTEL:
1820 		mce_intel_feature_init(c);
1821 		mce_adjust_timer = cmci_intel_adjust_timer;
1822 		break;
1823 
1824 	case X86_VENDOR_AMD: {
1825 		mce_amd_feature_init(c);
1826 		break;
1827 		}
1828 
1829 	case X86_VENDOR_HYGON:
1830 		mce_hygon_feature_init(c);
1831 		break;
1832 
1833 	case X86_VENDOR_CENTAUR:
1834 		mce_centaur_feature_init(c);
1835 		break;
1836 
1837 	case X86_VENDOR_ZHAOXIN:
1838 		mce_zhaoxin_feature_init(c);
1839 		break;
1840 
1841 	default:
1842 		break;
1843 	}
1844 }
1845 
1846 static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
1847 {
1848 	switch (c->x86_vendor) {
1849 	case X86_VENDOR_INTEL:
1850 		mce_intel_feature_clear(c);
1851 		break;
1852 
1853 	case X86_VENDOR_ZHAOXIN:
1854 		mce_zhaoxin_feature_clear(c);
1855 		break;
1856 
1857 	default:
1858 		break;
1859 	}
1860 }
1861 
1862 static void mce_start_timer(struct timer_list *t)
1863 {
1864 	unsigned long iv = check_interval * HZ;
1865 
1866 	if (mca_cfg.ignore_ce || !iv)
1867 		return;
1868 
1869 	this_cpu_write(mce_next_interval, iv);
1870 	__start_timer(t, iv);
1871 }
1872 
1873 static void __mcheck_cpu_setup_timer(void)
1874 {
1875 	struct timer_list *t = this_cpu_ptr(&mce_timer);
1876 
1877 	timer_setup(t, mce_timer_fn, TIMER_PINNED);
1878 }
1879 
1880 static void __mcheck_cpu_init_timer(void)
1881 {
1882 	struct timer_list *t = this_cpu_ptr(&mce_timer);
1883 
1884 	timer_setup(t, mce_timer_fn, TIMER_PINNED);
1885 	mce_start_timer(t);
1886 }
1887 
1888 bool filter_mce(struct mce *m)
1889 {
1890 	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
1891 		return amd_filter_mce(m);
1892 
1893 	return false;
1894 }
1895 
1896 /* Handle unconfigured int18 (should never happen) */
1897 static void unexpected_machine_check(struct pt_regs *regs, long error_code)
1898 {
1899 	pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
1900 	       smp_processor_id());
1901 }
1902 
1903 /* Call the installed machine check handler for this CPU setup. */
1904 void (*machine_check_vector)(struct pt_regs *, long error_code) =
1905 						unexpected_machine_check;
1906 
1907 dotraplinkage void do_mce(struct pt_regs *regs, long error_code)
1908 {
1909 	machine_check_vector(regs, error_code);
1910 }
1911 
1912 /*
1913  * Called for each booted CPU to set up machine checks.
1914  * Must be called with preempt off:
1915  */
1916 void mcheck_cpu_init(struct cpuinfo_x86 *c)
1917 {
1918 	if (mca_cfg.disabled)
1919 		return;
1920 
1921 	if (__mcheck_cpu_ancient_init(c))
1922 		return;
1923 
1924 	if (!mce_available(c))
1925 		return;
1926 
1927 	__mcheck_cpu_cap_init();
1928 
1929 	if (__mcheck_cpu_apply_quirks(c) < 0) {
1930 		mca_cfg.disabled = 1;
1931 		return;
1932 	}
1933 
1934 	if (mce_gen_pool_init()) {
1935 		mca_cfg.disabled = 1;
1936 		pr_emerg("Couldn't allocate MCE records pool!\n");
1937 		return;
1938 	}
1939 
1940 	machine_check_vector = do_machine_check;
1941 
1942 	__mcheck_cpu_init_early(c);
1943 	__mcheck_cpu_init_generic();
1944 	__mcheck_cpu_init_vendor(c);
1945 	__mcheck_cpu_init_clear_banks();
1946 	__mcheck_cpu_check_banks();
1947 	__mcheck_cpu_setup_timer();
1948 }
1949 
1950 /*
1951  * Called for each booted CPU to clear some machine checks opt-ins
1952  */
1953 void mcheck_cpu_clear(struct cpuinfo_x86 *c)
1954 {
1955 	if (mca_cfg.disabled)
1956 		return;
1957 
1958 	if (!mce_available(c))
1959 		return;
1960 
1961 	/*
1962 	 * Possibly to clear general settings generic to x86
1963 	 * __mcheck_cpu_clear_generic(c);
1964 	 */
1965 	__mcheck_cpu_clear_vendor(c);
1966 
1967 }
1968 
1969 static void __mce_disable_bank(void *arg)
1970 {
1971 	int bank = *((int *)arg);
1972 	__clear_bit(bank, this_cpu_ptr(mce_poll_banks));
1973 	cmci_disable_bank(bank);
1974 }
1975 
1976 void mce_disable_bank(int bank)
1977 {
1978 	if (bank >= this_cpu_read(mce_num_banks)) {
1979 		pr_warn(FW_BUG
1980 			"Ignoring request to disable invalid MCA bank %d.\n",
1981 			bank);
1982 		return;
1983 	}
1984 	set_bit(bank, mce_banks_ce_disabled);
1985 	on_each_cpu(__mce_disable_bank, &bank, 1);
1986 }
1987 
1988 /*
1989  * mce=off Disables machine check
1990  * mce=no_cmci Disables CMCI
1991  * mce=no_lmce Disables LMCE
1992  * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
1993  * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
1994  * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
1995  *	monarchtimeout is how long to wait for other CPUs on machine
1996  *	check, or 0 to not wait
1997  * mce=bootlog Log MCEs from before booting. Disabled by default on AMD Fam10h
1998 	and older.
1999  * mce=nobootlog Don't log MCEs from before booting.
2000  * mce=bios_cmci_threshold Don't program the CMCI threshold
2001  * mce=recovery force enable memcpy_mcsafe()
2002  */
2003 static int __init mcheck_enable(char *str)
2004 {
2005 	struct mca_config *cfg = &mca_cfg;
2006 
2007 	if (*str == 0) {
2008 		enable_p5_mce();
2009 		return 1;
2010 	}
2011 	if (*str == '=')
2012 		str++;
2013 	if (!strcmp(str, "off"))
2014 		cfg->disabled = 1;
2015 	else if (!strcmp(str, "no_cmci"))
2016 		cfg->cmci_disabled = true;
2017 	else if (!strcmp(str, "no_lmce"))
2018 		cfg->lmce_disabled = 1;
2019 	else if (!strcmp(str, "dont_log_ce"))
2020 		cfg->dont_log_ce = true;
2021 	else if (!strcmp(str, "ignore_ce"))
2022 		cfg->ignore_ce = true;
2023 	else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
2024 		cfg->bootlog = (str[0] == 'b');
2025 	else if (!strcmp(str, "bios_cmci_threshold"))
2026 		cfg->bios_cmci_threshold = 1;
2027 	else if (!strcmp(str, "recovery"))
2028 		cfg->recovery = 1;
2029 	else if (isdigit(str[0])) {
2030 		if (get_option(&str, &cfg->tolerant) == 2)
2031 			get_option(&str, &(cfg->monarch_timeout));
2032 	} else {
2033 		pr_info("mce argument %s ignored. Please use /sys\n", str);
2034 		return 0;
2035 	}
2036 	return 1;
2037 }
2038 __setup("mce", mcheck_enable);
2039 
2040 int __init mcheck_init(void)
2041 {
2042 	mcheck_intel_therm_init();
2043 	mce_register_decode_chain(&first_nb);
2044 	mce_register_decode_chain(&mce_srao_nb);
2045 	mce_register_decode_chain(&mce_default_nb);
2046 	mcheck_vendor_init_severity();
2047 
2048 	INIT_WORK(&mce_work, mce_gen_pool_process);
2049 	init_irq_work(&mce_irq_work, mce_irq_work_cb);
2050 
2051 	return 0;
2052 }
2053 
2054 /*
2055  * mce_syscore: PM support
2056  */
2057 
2058 /*
2059  * Disable machine checks on suspend and shutdown. We can't really handle
2060  * them later.
2061  */
2062 static void mce_disable_error_reporting(void)
2063 {
2064 	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
2065 	int i;
2066 
2067 	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
2068 		struct mce_bank *b = &mce_banks[i];
2069 
2070 		if (b->init)
2071 			wrmsrl(msr_ops.ctl(i), 0);
2072 	}
2073 	return;
2074 }
2075 
2076 static void vendor_disable_error_reporting(void)
2077 {
2078 	/*
2079 	 * Don't clear on Intel or AMD or Hygon or Zhaoxin CPUs. Some of these
2080 	 * MSRs are socket-wide. Disabling them for just a single offlined CPU
2081 	 * is bad, since it will inhibit reporting for all shared resources on
2082 	 * the socket like the last level cache (LLC), the integrated memory
2083 	 * controller (iMC), etc.
2084 	 */
2085 	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
2086 	    boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ||
2087 	    boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
2088 	    boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN)
2089 		return;
2090 
2091 	mce_disable_error_reporting();
2092 }
2093 
2094 static int mce_syscore_suspend(void)
2095 {
2096 	vendor_disable_error_reporting();
2097 	return 0;
2098 }
2099 
2100 static void mce_syscore_shutdown(void)
2101 {
2102 	vendor_disable_error_reporting();
2103 }
2104 
2105 /*
2106  * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
2107  * Only one CPU is active at this time, the others get re-added later using
2108  * CPU hotplug:
2109  */
2110 static void mce_syscore_resume(void)
2111 {
2112 	__mcheck_cpu_init_generic();
2113 	__mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
2114 	__mcheck_cpu_init_clear_banks();
2115 }
2116 
2117 static struct syscore_ops mce_syscore_ops = {
2118 	.suspend	= mce_syscore_suspend,
2119 	.shutdown	= mce_syscore_shutdown,
2120 	.resume		= mce_syscore_resume,
2121 };
2122 
2123 /*
2124  * mce_device: Sysfs support
2125  */
2126 
2127 static void mce_cpu_restart(void *data)
2128 {
2129 	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2130 		return;
2131 	__mcheck_cpu_init_generic();
2132 	__mcheck_cpu_init_clear_banks();
2133 	__mcheck_cpu_init_timer();
2134 }
2135 
2136 /* Reinit MCEs after user configuration changes */
2137 static void mce_restart(void)
2138 {
2139 	mce_timer_delete_all();
2140 	on_each_cpu(mce_cpu_restart, NULL, 1);
2141 }
2142 
2143 /* Toggle features for corrected errors */
2144 static void mce_disable_cmci(void *data)
2145 {
2146 	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2147 		return;
2148 	cmci_clear();
2149 }
2150 
2151 static void mce_enable_ce(void *all)
2152 {
2153 	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2154 		return;
2155 	cmci_reenable();
2156 	cmci_recheck();
2157 	if (all)
2158 		__mcheck_cpu_init_timer();
2159 }
2160 
2161 static struct bus_type mce_subsys = {
2162 	.name		= "machinecheck",
2163 	.dev_name	= "machinecheck",
2164 };
2165 
2166 DEFINE_PER_CPU(struct device *, mce_device);
2167 
2168 static inline struct mce_bank_dev *attr_to_bank(struct device_attribute *attr)
2169 {
2170 	return container_of(attr, struct mce_bank_dev, attr);
2171 }
2172 
2173 static ssize_t show_bank(struct device *s, struct device_attribute *attr,
2174 			 char *buf)
2175 {
2176 	u8 bank = attr_to_bank(attr)->bank;
2177 	struct mce_bank *b;
2178 
2179 	if (bank >= per_cpu(mce_num_banks, s->id))
2180 		return -EINVAL;
2181 
2182 	b = &per_cpu(mce_banks_array, s->id)[bank];
2183 
2184 	if (!b->init)
2185 		return -ENODEV;
2186 
2187 	return sprintf(buf, "%llx\n", b->ctl);
2188 }
2189 
2190 static ssize_t set_bank(struct device *s, struct device_attribute *attr,
2191 			const char *buf, size_t size)
2192 {
2193 	u8 bank = attr_to_bank(attr)->bank;
2194 	struct mce_bank *b;
2195 	u64 new;
2196 
2197 	if (kstrtou64(buf, 0, &new) < 0)
2198 		return -EINVAL;
2199 
2200 	if (bank >= per_cpu(mce_num_banks, s->id))
2201 		return -EINVAL;
2202 
2203 	b = &per_cpu(mce_banks_array, s->id)[bank];
2204 
2205 	if (!b->init)
2206 		return -ENODEV;
2207 
2208 	b->ctl = new;
2209 	mce_restart();
2210 
2211 	return size;
2212 }
2213 
2214 static ssize_t set_ignore_ce(struct device *s,
2215 			     struct device_attribute *attr,
2216 			     const char *buf, size_t size)
2217 {
2218 	u64 new;
2219 
2220 	if (kstrtou64(buf, 0, &new) < 0)
2221 		return -EINVAL;
2222 
2223 	mutex_lock(&mce_sysfs_mutex);
2224 	if (mca_cfg.ignore_ce ^ !!new) {
2225 		if (new) {
2226 			/* disable ce features */
2227 			mce_timer_delete_all();
2228 			on_each_cpu(mce_disable_cmci, NULL, 1);
2229 			mca_cfg.ignore_ce = true;
2230 		} else {
2231 			/* enable ce features */
2232 			mca_cfg.ignore_ce = false;
2233 			on_each_cpu(mce_enable_ce, (void *)1, 1);
2234 		}
2235 	}
2236 	mutex_unlock(&mce_sysfs_mutex);
2237 
2238 	return size;
2239 }
2240 
2241 static ssize_t set_cmci_disabled(struct device *s,
2242 				 struct device_attribute *attr,
2243 				 const char *buf, size_t size)
2244 {
2245 	u64 new;
2246 
2247 	if (kstrtou64(buf, 0, &new) < 0)
2248 		return -EINVAL;
2249 
2250 	mutex_lock(&mce_sysfs_mutex);
2251 	if (mca_cfg.cmci_disabled ^ !!new) {
2252 		if (new) {
2253 			/* disable cmci */
2254 			on_each_cpu(mce_disable_cmci, NULL, 1);
2255 			mca_cfg.cmci_disabled = true;
2256 		} else {
2257 			/* enable cmci */
2258 			mca_cfg.cmci_disabled = false;
2259 			on_each_cpu(mce_enable_ce, NULL, 1);
2260 		}
2261 	}
2262 	mutex_unlock(&mce_sysfs_mutex);
2263 
2264 	return size;
2265 }
2266 
2267 static ssize_t store_int_with_restart(struct device *s,
2268 				      struct device_attribute *attr,
2269 				      const char *buf, size_t size)
2270 {
2271 	unsigned long old_check_interval = check_interval;
2272 	ssize_t ret = device_store_ulong(s, attr, buf, size);
2273 
2274 	if (check_interval == old_check_interval)
2275 		return ret;
2276 
2277 	mutex_lock(&mce_sysfs_mutex);
2278 	mce_restart();
2279 	mutex_unlock(&mce_sysfs_mutex);
2280 
2281 	return ret;
2282 }
2283 
2284 static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant);
2285 static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
2286 static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
2287 
2288 static struct dev_ext_attribute dev_attr_check_interval = {
2289 	__ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
2290 	&check_interval
2291 };
2292 
2293 static struct dev_ext_attribute dev_attr_ignore_ce = {
2294 	__ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce),
2295 	&mca_cfg.ignore_ce
2296 };
2297 
2298 static struct dev_ext_attribute dev_attr_cmci_disabled = {
2299 	__ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled),
2300 	&mca_cfg.cmci_disabled
2301 };
2302 
2303 static struct device_attribute *mce_device_attrs[] = {
2304 	&dev_attr_tolerant.attr,
2305 	&dev_attr_check_interval.attr,
2306 #ifdef CONFIG_X86_MCELOG_LEGACY
2307 	&dev_attr_trigger,
2308 #endif
2309 	&dev_attr_monarch_timeout.attr,
2310 	&dev_attr_dont_log_ce.attr,
2311 	&dev_attr_ignore_ce.attr,
2312 	&dev_attr_cmci_disabled.attr,
2313 	NULL
2314 };
2315 
2316 static cpumask_var_t mce_device_initialized;
2317 
2318 static void mce_device_release(struct device *dev)
2319 {
2320 	kfree(dev);
2321 }
2322 
2323 /* Per CPU device init. All of the CPUs still share the same bank device: */
2324 static int mce_device_create(unsigned int cpu)
2325 {
2326 	struct device *dev;
2327 	int err;
2328 	int i, j;
2329 
2330 	if (!mce_available(&boot_cpu_data))
2331 		return -EIO;
2332 
2333 	dev = per_cpu(mce_device, cpu);
2334 	if (dev)
2335 		return 0;
2336 
2337 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2338 	if (!dev)
2339 		return -ENOMEM;
2340 	dev->id  = cpu;
2341 	dev->bus = &mce_subsys;
2342 	dev->release = &mce_device_release;
2343 
2344 	err = device_register(dev);
2345 	if (err) {
2346 		put_device(dev);
2347 		return err;
2348 	}
2349 
2350 	for (i = 0; mce_device_attrs[i]; i++) {
2351 		err = device_create_file(dev, mce_device_attrs[i]);
2352 		if (err)
2353 			goto error;
2354 	}
2355 	for (j = 0; j < per_cpu(mce_num_banks, cpu); j++) {
2356 		err = device_create_file(dev, &mce_bank_devs[j].attr);
2357 		if (err)
2358 			goto error2;
2359 	}
2360 	cpumask_set_cpu(cpu, mce_device_initialized);
2361 	per_cpu(mce_device, cpu) = dev;
2362 
2363 	return 0;
2364 error2:
2365 	while (--j >= 0)
2366 		device_remove_file(dev, &mce_bank_devs[j].attr);
2367 error:
2368 	while (--i >= 0)
2369 		device_remove_file(dev, mce_device_attrs[i]);
2370 
2371 	device_unregister(dev);
2372 
2373 	return err;
2374 }
2375 
2376 static void mce_device_remove(unsigned int cpu)
2377 {
2378 	struct device *dev = per_cpu(mce_device, cpu);
2379 	int i;
2380 
2381 	if (!cpumask_test_cpu(cpu, mce_device_initialized))
2382 		return;
2383 
2384 	for (i = 0; mce_device_attrs[i]; i++)
2385 		device_remove_file(dev, mce_device_attrs[i]);
2386 
2387 	for (i = 0; i < per_cpu(mce_num_banks, cpu); i++)
2388 		device_remove_file(dev, &mce_bank_devs[i].attr);
2389 
2390 	device_unregister(dev);
2391 	cpumask_clear_cpu(cpu, mce_device_initialized);
2392 	per_cpu(mce_device, cpu) = NULL;
2393 }
2394 
2395 /* Make sure there are no machine checks on offlined CPUs. */
2396 static void mce_disable_cpu(void)
2397 {
2398 	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2399 		return;
2400 
2401 	if (!cpuhp_tasks_frozen)
2402 		cmci_clear();
2403 
2404 	vendor_disable_error_reporting();
2405 }
2406 
2407 static void mce_reenable_cpu(void)
2408 {
2409 	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
2410 	int i;
2411 
2412 	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2413 		return;
2414 
2415 	if (!cpuhp_tasks_frozen)
2416 		cmci_reenable();
2417 	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
2418 		struct mce_bank *b = &mce_banks[i];
2419 
2420 		if (b->init)
2421 			wrmsrl(msr_ops.ctl(i), b->ctl);
2422 	}
2423 }
2424 
2425 static int mce_cpu_dead(unsigned int cpu)
2426 {
2427 	mce_intel_hcpu_update(cpu);
2428 
2429 	/* intentionally ignoring frozen here */
2430 	if (!cpuhp_tasks_frozen)
2431 		cmci_rediscover();
2432 	return 0;
2433 }
2434 
2435 static int mce_cpu_online(unsigned int cpu)
2436 {
2437 	struct timer_list *t = this_cpu_ptr(&mce_timer);
2438 	int ret;
2439 
2440 	mce_device_create(cpu);
2441 
2442 	ret = mce_threshold_create_device(cpu);
2443 	if (ret) {
2444 		mce_device_remove(cpu);
2445 		return ret;
2446 	}
2447 	mce_reenable_cpu();
2448 	mce_start_timer(t);
2449 	return 0;
2450 }
2451 
2452 static int mce_cpu_pre_down(unsigned int cpu)
2453 {
2454 	struct timer_list *t = this_cpu_ptr(&mce_timer);
2455 
2456 	mce_disable_cpu();
2457 	del_timer_sync(t);
2458 	mce_threshold_remove_device(cpu);
2459 	mce_device_remove(cpu);
2460 	return 0;
2461 }
2462 
2463 static __init void mce_init_banks(void)
2464 {
2465 	int i;
2466 
2467 	for (i = 0; i < MAX_NR_BANKS; i++) {
2468 		struct mce_bank_dev *b = &mce_bank_devs[i];
2469 		struct device_attribute *a = &b->attr;
2470 
2471 		b->bank = i;
2472 
2473 		sysfs_attr_init(&a->attr);
2474 		a->attr.name	= b->attrname;
2475 		snprintf(b->attrname, ATTR_LEN, "bank%d", i);
2476 
2477 		a->attr.mode	= 0644;
2478 		a->show		= show_bank;
2479 		a->store	= set_bank;
2480 	}
2481 }
2482 
2483 static __init int mcheck_init_device(void)
2484 {
2485 	int err;
2486 
2487 	/*
2488 	 * Check if we have a spare virtual bit. This will only become
2489 	 * a problem if/when we move beyond 5-level page tables.
2490 	 */
2491 	MAYBE_BUILD_BUG_ON(__VIRTUAL_MASK_SHIFT >= 63);
2492 
2493 	if (!mce_available(&boot_cpu_data)) {
2494 		err = -EIO;
2495 		goto err_out;
2496 	}
2497 
2498 	if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
2499 		err = -ENOMEM;
2500 		goto err_out;
2501 	}
2502 
2503 	mce_init_banks();
2504 
2505 	err = subsys_system_register(&mce_subsys, NULL);
2506 	if (err)
2507 		goto err_out_mem;
2508 
2509 	err = cpuhp_setup_state(CPUHP_X86_MCE_DEAD, "x86/mce:dead", NULL,
2510 				mce_cpu_dead);
2511 	if (err)
2512 		goto err_out_mem;
2513 
2514 	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/mce:online",
2515 				mce_cpu_online, mce_cpu_pre_down);
2516 	if (err < 0)
2517 		goto err_out_online;
2518 
2519 	register_syscore_ops(&mce_syscore_ops);
2520 
2521 	return 0;
2522 
2523 err_out_online:
2524 	cpuhp_remove_state(CPUHP_X86_MCE_DEAD);
2525 
2526 err_out_mem:
2527 	free_cpumask_var(mce_device_initialized);
2528 
2529 err_out:
2530 	pr_err("Unable to init MCE device (rc: %d)\n", err);
2531 
2532 	return err;
2533 }
2534 device_initcall_sync(mcheck_init_device);
2535 
2536 /*
2537  * Old style boot options parsing. Only for compatibility.
2538  */
2539 static int __init mcheck_disable(char *str)
2540 {
2541 	mca_cfg.disabled = 1;
2542 	return 1;
2543 }
2544 __setup("nomce", mcheck_disable);
2545 
2546 #ifdef CONFIG_DEBUG_FS
2547 struct dentry *mce_get_debugfs_dir(void)
2548 {
2549 	static struct dentry *dmce;
2550 
2551 	if (!dmce)
2552 		dmce = debugfs_create_dir("mce", NULL);
2553 
2554 	return dmce;
2555 }
2556 
2557 static void mce_reset(void)
2558 {
2559 	cpu_missing = 0;
2560 	atomic_set(&mce_fake_panicked, 0);
2561 	atomic_set(&mce_executing, 0);
2562 	atomic_set(&mce_callin, 0);
2563 	atomic_set(&global_nwo, 0);
2564 }
2565 
2566 static int fake_panic_get(void *data, u64 *val)
2567 {
2568 	*val = fake_panic;
2569 	return 0;
2570 }
2571 
2572 static int fake_panic_set(void *data, u64 val)
2573 {
2574 	mce_reset();
2575 	fake_panic = val;
2576 	return 0;
2577 }
2578 
2579 DEFINE_DEBUGFS_ATTRIBUTE(fake_panic_fops, fake_panic_get, fake_panic_set,
2580 			 "%llu\n");
2581 
2582 static void __init mcheck_debugfs_init(void)
2583 {
2584 	struct dentry *dmce;
2585 
2586 	dmce = mce_get_debugfs_dir();
2587 	debugfs_create_file_unsafe("fake_panic", 0444, dmce, NULL,
2588 				   &fake_panic_fops);
2589 }
2590 #else
2591 static void __init mcheck_debugfs_init(void) { }
2592 #endif
2593 
2594 DEFINE_STATIC_KEY_FALSE(mcsafe_key);
2595 EXPORT_SYMBOL_GPL(mcsafe_key);
2596 
2597 static int __init mcheck_late_init(void)
2598 {
2599 	if (mca_cfg.recovery)
2600 		static_branch_inc(&mcsafe_key);
2601 
2602 	mcheck_debugfs_init();
2603 	cec_init();
2604 
2605 	/*
2606 	 * Flush out everything that has been logged during early boot, now that
2607 	 * everything has been initialized (workqueues, decoders, ...).
2608 	 */
2609 	mce_schedule_work();
2610 
2611 	return 0;
2612 }
2613 late_initcall(mcheck_late_init);
2614