1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Machine check handler. 4 * 5 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. 6 * Rest from unknown author(s). 7 * 2004 Andi Kleen. Rewrote most of it. 8 * Copyright 2008 Intel Corporation 9 * Author: Andi Kleen 10 */ 11 12 #include <linux/thread_info.h> 13 #include <linux/capability.h> 14 #include <linux/miscdevice.h> 15 #include <linux/ratelimit.h> 16 #include <linux/rcupdate.h> 17 #include <linux/kobject.h> 18 #include <linux/uaccess.h> 19 #include <linux/kdebug.h> 20 #include <linux/kernel.h> 21 #include <linux/percpu.h> 22 #include <linux/string.h> 23 #include <linux/device.h> 24 #include <linux/syscore_ops.h> 25 #include <linux/delay.h> 26 #include <linux/ctype.h> 27 #include <linux/sched.h> 28 #include <linux/sysfs.h> 29 #include <linux/types.h> 30 #include <linux/slab.h> 31 #include <linux/init.h> 32 #include <linux/kmod.h> 33 #include <linux/poll.h> 34 #include <linux/nmi.h> 35 #include <linux/cpu.h> 36 #include <linux/ras.h> 37 #include <linux/smp.h> 38 #include <linux/fs.h> 39 #include <linux/mm.h> 40 #include <linux/debugfs.h> 41 #include <linux/irq_work.h> 42 #include <linux/export.h> 43 #include <linux/set_memory.h> 44 #include <linux/sync_core.h> 45 #include <linux/task_work.h> 46 #include <linux/hardirq.h> 47 48 #include <asm/intel-family.h> 49 #include <asm/processor.h> 50 #include <asm/traps.h> 51 #include <asm/tlbflush.h> 52 #include <asm/mce.h> 53 #include <asm/msr.h> 54 #include <asm/reboot.h> 55 56 #include "internal.h" 57 58 /* sysfs synchronization */ 59 static DEFINE_MUTEX(mce_sysfs_mutex); 60 61 #define CREATE_TRACE_POINTS 62 #include <trace/events/mce.h> 63 64 #define SPINUNIT 100 /* 100ns */ 65 66 DEFINE_PER_CPU(unsigned, mce_exception_count); 67 68 DEFINE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks); 69 70 struct mce_bank { 71 u64 ctl; /* subevents to enable */ 72 bool init; /* initialise bank? */ 73 }; 74 static DEFINE_PER_CPU_READ_MOSTLY(struct mce_bank[MAX_NR_BANKS], mce_banks_array); 75 76 #define ATTR_LEN 16 77 /* One object for each MCE bank, shared by all CPUs */ 78 struct mce_bank_dev { 79 struct device_attribute attr; /* device attribute */ 80 char attrname[ATTR_LEN]; /* attribute name */ 81 u8 bank; /* bank number */ 82 }; 83 static struct mce_bank_dev mce_bank_devs[MAX_NR_BANKS]; 84 85 struct mce_vendor_flags mce_flags __read_mostly; 86 87 struct mca_config mca_cfg __read_mostly = { 88 .bootlog = -1, 89 /* 90 * Tolerant levels: 91 * 0: always panic on uncorrected errors, log corrected errors 92 * 1: panic or SIGBUS on uncorrected errors, log corrected errors 93 * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors 94 * 3: never panic or SIGBUS, log all errors (for testing only) 95 */ 96 .tolerant = 1, 97 .monarch_timeout = -1 98 }; 99 100 static DEFINE_PER_CPU(struct mce, mces_seen); 101 static unsigned long mce_need_notify; 102 103 /* 104 * MCA banks polled by the period polling timer for corrected events. 105 * With Intel CMCI, this only has MCA banks which do not support CMCI (if any). 106 */ 107 DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { 108 [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL 109 }; 110 111 /* 112 * MCA banks controlled through firmware first for corrected errors. 113 * This is a global list of banks for which we won't enable CMCI and we 114 * won't poll. Firmware controls these banks and is responsible for 115 * reporting corrected errors through GHES. Uncorrected/recoverable 116 * errors are still notified through a machine check. 117 */ 118 mce_banks_t mce_banks_ce_disabled; 119 120 static struct work_struct mce_work; 121 static struct irq_work mce_irq_work; 122 123 /* 124 * CPU/chipset specific EDAC code can register a notifier call here to print 125 * MCE errors in a human-readable form. 126 */ 127 BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain); 128 129 /* Do initial initialization of a struct mce */ 130 void mce_setup(struct mce *m) 131 { 132 memset(m, 0, sizeof(struct mce)); 133 m->cpu = m->extcpu = smp_processor_id(); 134 /* need the internal __ version to avoid deadlocks */ 135 m->time = __ktime_get_real_seconds(); 136 m->cpuvendor = boot_cpu_data.x86_vendor; 137 m->cpuid = cpuid_eax(1); 138 m->socketid = cpu_data(m->extcpu).phys_proc_id; 139 m->apicid = cpu_data(m->extcpu).initial_apicid; 140 m->mcgcap = __rdmsr(MSR_IA32_MCG_CAP); 141 m->ppin = cpu_data(m->extcpu).ppin; 142 m->microcode = boot_cpu_data.microcode; 143 } 144 145 DEFINE_PER_CPU(struct mce, injectm); 146 EXPORT_PER_CPU_SYMBOL_GPL(injectm); 147 148 void mce_log(struct mce *m) 149 { 150 if (!mce_gen_pool_add(m)) 151 irq_work_queue(&mce_irq_work); 152 } 153 EXPORT_SYMBOL_GPL(mce_log); 154 155 void mce_register_decode_chain(struct notifier_block *nb) 156 { 157 if (WARN_ON(nb->priority < MCE_PRIO_LOWEST || 158 nb->priority > MCE_PRIO_HIGHEST)) 159 return; 160 161 blocking_notifier_chain_register(&x86_mce_decoder_chain, nb); 162 } 163 EXPORT_SYMBOL_GPL(mce_register_decode_chain); 164 165 void mce_unregister_decode_chain(struct notifier_block *nb) 166 { 167 blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb); 168 } 169 EXPORT_SYMBOL_GPL(mce_unregister_decode_chain); 170 171 u32 mca_msr_reg(int bank, enum mca_msr reg) 172 { 173 if (mce_flags.smca) { 174 switch (reg) { 175 case MCA_CTL: return MSR_AMD64_SMCA_MCx_CTL(bank); 176 case MCA_ADDR: return MSR_AMD64_SMCA_MCx_ADDR(bank); 177 case MCA_MISC: return MSR_AMD64_SMCA_MCx_MISC(bank); 178 case MCA_STATUS: return MSR_AMD64_SMCA_MCx_STATUS(bank); 179 } 180 } 181 182 switch (reg) { 183 case MCA_CTL: return MSR_IA32_MCx_CTL(bank); 184 case MCA_ADDR: return MSR_IA32_MCx_ADDR(bank); 185 case MCA_MISC: return MSR_IA32_MCx_MISC(bank); 186 case MCA_STATUS: return MSR_IA32_MCx_STATUS(bank); 187 } 188 189 return 0; 190 } 191 192 static void __print_mce(struct mce *m) 193 { 194 pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n", 195 m->extcpu, 196 (m->mcgstatus & MCG_STATUS_MCIP ? " Exception" : ""), 197 m->mcgstatus, m->bank, m->status); 198 199 if (m->ip) { 200 pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ", 201 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", 202 m->cs, m->ip); 203 204 if (m->cs == __KERNEL_CS) 205 pr_cont("{%pS}", (void *)(unsigned long)m->ip); 206 pr_cont("\n"); 207 } 208 209 pr_emerg(HW_ERR "TSC %llx ", m->tsc); 210 if (m->addr) 211 pr_cont("ADDR %llx ", m->addr); 212 if (m->misc) 213 pr_cont("MISC %llx ", m->misc); 214 if (m->ppin) 215 pr_cont("PPIN %llx ", m->ppin); 216 217 if (mce_flags.smca) { 218 if (m->synd) 219 pr_cont("SYND %llx ", m->synd); 220 if (m->ipid) 221 pr_cont("IPID %llx ", m->ipid); 222 } 223 224 pr_cont("\n"); 225 226 /* 227 * Note this output is parsed by external tools and old fields 228 * should not be changed. 229 */ 230 pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n", 231 m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid, 232 m->microcode); 233 } 234 235 static void print_mce(struct mce *m) 236 { 237 __print_mce(m); 238 239 if (m->cpuvendor != X86_VENDOR_AMD && m->cpuvendor != X86_VENDOR_HYGON) 240 pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n"); 241 } 242 243 #define PANIC_TIMEOUT 5 /* 5 seconds */ 244 245 static atomic_t mce_panicked; 246 247 static int fake_panic; 248 static atomic_t mce_fake_panicked; 249 250 /* Panic in progress. Enable interrupts and wait for final IPI */ 251 static void wait_for_panic(void) 252 { 253 long timeout = PANIC_TIMEOUT*USEC_PER_SEC; 254 255 preempt_disable(); 256 local_irq_enable(); 257 while (timeout-- > 0) 258 udelay(1); 259 if (panic_timeout == 0) 260 panic_timeout = mca_cfg.panic_timeout; 261 panic("Panicing machine check CPU died"); 262 } 263 264 static noinstr void mce_panic(const char *msg, struct mce *final, char *exp) 265 { 266 struct llist_node *pending; 267 struct mce_evt_llist *l; 268 int apei_err = 0; 269 270 /* 271 * Allow instrumentation around external facilities usage. Not that it 272 * matters a whole lot since the machine is going to panic anyway. 273 */ 274 instrumentation_begin(); 275 276 if (!fake_panic) { 277 /* 278 * Make sure only one CPU runs in machine check panic 279 */ 280 if (atomic_inc_return(&mce_panicked) > 1) 281 wait_for_panic(); 282 barrier(); 283 284 bust_spinlocks(1); 285 console_verbose(); 286 } else { 287 /* Don't log too much for fake panic */ 288 if (atomic_inc_return(&mce_fake_panicked) > 1) 289 goto out; 290 } 291 pending = mce_gen_pool_prepare_records(); 292 /* First print corrected ones that are still unlogged */ 293 llist_for_each_entry(l, pending, llnode) { 294 struct mce *m = &l->mce; 295 if (!(m->status & MCI_STATUS_UC)) { 296 print_mce(m); 297 if (!apei_err) 298 apei_err = apei_write_mce(m); 299 } 300 } 301 /* Now print uncorrected but with the final one last */ 302 llist_for_each_entry(l, pending, llnode) { 303 struct mce *m = &l->mce; 304 if (!(m->status & MCI_STATUS_UC)) 305 continue; 306 if (!final || mce_cmp(m, final)) { 307 print_mce(m); 308 if (!apei_err) 309 apei_err = apei_write_mce(m); 310 } 311 } 312 if (final) { 313 print_mce(final); 314 if (!apei_err) 315 apei_err = apei_write_mce(final); 316 } 317 if (exp) 318 pr_emerg(HW_ERR "Machine check: %s\n", exp); 319 if (!fake_panic) { 320 if (panic_timeout == 0) 321 panic_timeout = mca_cfg.panic_timeout; 322 panic(msg); 323 } else 324 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg); 325 326 out: 327 instrumentation_end(); 328 } 329 330 /* Support code for software error injection */ 331 332 static int msr_to_offset(u32 msr) 333 { 334 unsigned bank = __this_cpu_read(injectm.bank); 335 336 if (msr == mca_cfg.rip_msr) 337 return offsetof(struct mce, ip); 338 if (msr == mca_msr_reg(bank, MCA_STATUS)) 339 return offsetof(struct mce, status); 340 if (msr == mca_msr_reg(bank, MCA_ADDR)) 341 return offsetof(struct mce, addr); 342 if (msr == mca_msr_reg(bank, MCA_MISC)) 343 return offsetof(struct mce, misc); 344 if (msr == MSR_IA32_MCG_STATUS) 345 return offsetof(struct mce, mcgstatus); 346 return -1; 347 } 348 349 void ex_handler_msr_mce(struct pt_regs *regs, bool wrmsr) 350 { 351 if (wrmsr) { 352 pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n", 353 (unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax, 354 regs->ip, (void *)regs->ip); 355 } else { 356 pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n", 357 (unsigned int)regs->cx, regs->ip, (void *)regs->ip); 358 } 359 360 show_stack_regs(regs); 361 362 panic("MCA architectural violation!\n"); 363 364 while (true) 365 cpu_relax(); 366 } 367 368 /* MSR access wrappers used for error injection */ 369 noinstr u64 mce_rdmsrl(u32 msr) 370 { 371 DECLARE_ARGS(val, low, high); 372 373 if (__this_cpu_read(injectm.finished)) { 374 int offset; 375 u64 ret; 376 377 instrumentation_begin(); 378 379 offset = msr_to_offset(msr); 380 if (offset < 0) 381 ret = 0; 382 else 383 ret = *(u64 *)((char *)this_cpu_ptr(&injectm) + offset); 384 385 instrumentation_end(); 386 387 return ret; 388 } 389 390 /* 391 * RDMSR on MCA MSRs should not fault. If they do, this is very much an 392 * architectural violation and needs to be reported to hw vendor. Panic 393 * the box to not allow any further progress. 394 */ 395 asm volatile("1: rdmsr\n" 396 "2:\n" 397 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_RDMSR_IN_MCE) 398 : EAX_EDX_RET(val, low, high) : "c" (msr)); 399 400 401 return EAX_EDX_VAL(val, low, high); 402 } 403 404 static noinstr void mce_wrmsrl(u32 msr, u64 v) 405 { 406 u32 low, high; 407 408 if (__this_cpu_read(injectm.finished)) { 409 int offset; 410 411 instrumentation_begin(); 412 413 offset = msr_to_offset(msr); 414 if (offset >= 0) 415 *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v; 416 417 instrumentation_end(); 418 419 return; 420 } 421 422 low = (u32)v; 423 high = (u32)(v >> 32); 424 425 /* See comment in mce_rdmsrl() */ 426 asm volatile("1: wrmsr\n" 427 "2:\n" 428 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR_IN_MCE) 429 : : "c" (msr), "a"(low), "d" (high) : "memory"); 430 } 431 432 /* 433 * Collect all global (w.r.t. this processor) status about this machine 434 * check into our "mce" struct so that we can use it later to assess 435 * the severity of the problem as we read per-bank specific details. 436 */ 437 static noinstr void mce_gather_info(struct mce *m, struct pt_regs *regs) 438 { 439 /* 440 * Enable instrumentation around mce_setup() which calls external 441 * facilities. 442 */ 443 instrumentation_begin(); 444 mce_setup(m); 445 instrumentation_end(); 446 447 m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); 448 if (regs) { 449 /* 450 * Get the address of the instruction at the time of 451 * the machine check error. 452 */ 453 if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) { 454 m->ip = regs->ip; 455 m->cs = regs->cs; 456 457 /* 458 * When in VM86 mode make the cs look like ring 3 459 * always. This is a lie, but it's better than passing 460 * the additional vm86 bit around everywhere. 461 */ 462 if (v8086_mode(regs)) 463 m->cs |= 3; 464 } 465 /* Use accurate RIP reporting if available. */ 466 if (mca_cfg.rip_msr) 467 m->ip = mce_rdmsrl(mca_cfg.rip_msr); 468 } 469 } 470 471 int mce_available(struct cpuinfo_x86 *c) 472 { 473 if (mca_cfg.disabled) 474 return 0; 475 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); 476 } 477 478 static void mce_schedule_work(void) 479 { 480 if (!mce_gen_pool_empty()) 481 schedule_work(&mce_work); 482 } 483 484 static void mce_irq_work_cb(struct irq_work *entry) 485 { 486 mce_schedule_work(); 487 } 488 489 /* 490 * Check if the address reported by the CPU is in a format we can parse. 491 * It would be possible to add code for most other cases, but all would 492 * be somewhat complicated (e.g. segment offset would require an instruction 493 * parser). So only support physical addresses up to page granularity for now. 494 */ 495 int mce_usable_address(struct mce *m) 496 { 497 if (!(m->status & MCI_STATUS_ADDRV)) 498 return 0; 499 500 /* Checks after this one are Intel/Zhaoxin-specific: */ 501 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL && 502 boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) 503 return 1; 504 505 if (!(m->status & MCI_STATUS_MISCV)) 506 return 0; 507 508 if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT) 509 return 0; 510 511 if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS) 512 return 0; 513 514 return 1; 515 } 516 EXPORT_SYMBOL_GPL(mce_usable_address); 517 518 bool mce_is_memory_error(struct mce *m) 519 { 520 switch (m->cpuvendor) { 521 case X86_VENDOR_AMD: 522 case X86_VENDOR_HYGON: 523 return amd_mce_is_memory_error(m); 524 525 case X86_VENDOR_INTEL: 526 case X86_VENDOR_ZHAOXIN: 527 /* 528 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes 529 * 530 * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for 531 * indicating a memory error. Bit 8 is used for indicating a 532 * cache hierarchy error. The combination of bit 2 and bit 3 533 * is used for indicating a `generic' cache hierarchy error 534 * But we can't just blindly check the above bits, because if 535 * bit 11 is set, then it is a bus/interconnect error - and 536 * either way the above bits just gives more detail on what 537 * bus/interconnect error happened. Note that bit 12 can be 538 * ignored, as it's the "filter" bit. 539 */ 540 return (m->status & 0xef80) == BIT(7) || 541 (m->status & 0xef00) == BIT(8) || 542 (m->status & 0xeffc) == 0xc; 543 544 default: 545 return false; 546 } 547 } 548 EXPORT_SYMBOL_GPL(mce_is_memory_error); 549 550 static bool whole_page(struct mce *m) 551 { 552 if (!mca_cfg.ser || !(m->status & MCI_STATUS_MISCV)) 553 return true; 554 555 return MCI_MISC_ADDR_LSB(m->misc) >= PAGE_SHIFT; 556 } 557 558 bool mce_is_correctable(struct mce *m) 559 { 560 if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED) 561 return false; 562 563 if (m->cpuvendor == X86_VENDOR_HYGON && m->status & MCI_STATUS_DEFERRED) 564 return false; 565 566 if (m->status & MCI_STATUS_UC) 567 return false; 568 569 return true; 570 } 571 EXPORT_SYMBOL_GPL(mce_is_correctable); 572 573 static int mce_early_notifier(struct notifier_block *nb, unsigned long val, 574 void *data) 575 { 576 struct mce *m = (struct mce *)data; 577 578 if (!m) 579 return NOTIFY_DONE; 580 581 /* Emit the trace record: */ 582 trace_mce_record(m); 583 584 set_bit(0, &mce_need_notify); 585 586 mce_notify_irq(); 587 588 return NOTIFY_DONE; 589 } 590 591 static struct notifier_block early_nb = { 592 .notifier_call = mce_early_notifier, 593 .priority = MCE_PRIO_EARLY, 594 }; 595 596 static int uc_decode_notifier(struct notifier_block *nb, unsigned long val, 597 void *data) 598 { 599 struct mce *mce = (struct mce *)data; 600 unsigned long pfn; 601 602 if (!mce || !mce_usable_address(mce)) 603 return NOTIFY_DONE; 604 605 if (mce->severity != MCE_AO_SEVERITY && 606 mce->severity != MCE_DEFERRED_SEVERITY) 607 return NOTIFY_DONE; 608 609 pfn = mce->addr >> PAGE_SHIFT; 610 if (!memory_failure(pfn, 0)) { 611 set_mce_nospec(pfn, whole_page(mce)); 612 mce->kflags |= MCE_HANDLED_UC; 613 } 614 615 return NOTIFY_OK; 616 } 617 618 static struct notifier_block mce_uc_nb = { 619 .notifier_call = uc_decode_notifier, 620 .priority = MCE_PRIO_UC, 621 }; 622 623 static int mce_default_notifier(struct notifier_block *nb, unsigned long val, 624 void *data) 625 { 626 struct mce *m = (struct mce *)data; 627 628 if (!m) 629 return NOTIFY_DONE; 630 631 if (mca_cfg.print_all || !m->kflags) 632 __print_mce(m); 633 634 return NOTIFY_DONE; 635 } 636 637 static struct notifier_block mce_default_nb = { 638 .notifier_call = mce_default_notifier, 639 /* lowest prio, we want it to run last. */ 640 .priority = MCE_PRIO_LOWEST, 641 }; 642 643 /* 644 * Read ADDR and MISC registers. 645 */ 646 static noinstr void mce_read_aux(struct mce *m, int i) 647 { 648 if (m->status & MCI_STATUS_MISCV) 649 m->misc = mce_rdmsrl(mca_msr_reg(i, MCA_MISC)); 650 651 if (m->status & MCI_STATUS_ADDRV) { 652 m->addr = mce_rdmsrl(mca_msr_reg(i, MCA_ADDR)); 653 654 /* 655 * Mask the reported address by the reported granularity. 656 */ 657 if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) { 658 u8 shift = MCI_MISC_ADDR_LSB(m->misc); 659 m->addr >>= shift; 660 m->addr <<= shift; 661 } 662 663 /* 664 * Extract [55:<lsb>] where lsb is the least significant 665 * *valid* bit of the address bits. 666 */ 667 if (mce_flags.smca) { 668 u8 lsb = (m->addr >> 56) & 0x3f; 669 670 m->addr &= GENMASK_ULL(55, lsb); 671 } 672 } 673 674 if (mce_flags.smca) { 675 m->ipid = mce_rdmsrl(MSR_AMD64_SMCA_MCx_IPID(i)); 676 677 if (m->status & MCI_STATUS_SYNDV) 678 m->synd = mce_rdmsrl(MSR_AMD64_SMCA_MCx_SYND(i)); 679 } 680 } 681 682 DEFINE_PER_CPU(unsigned, mce_poll_count); 683 684 /* 685 * Poll for corrected events or events that happened before reset. 686 * Those are just logged through /dev/mcelog. 687 * 688 * This is executed in standard interrupt context. 689 * 690 * Note: spec recommends to panic for fatal unsignalled 691 * errors here. However this would be quite problematic -- 692 * we would need to reimplement the Monarch handling and 693 * it would mess up the exclusion between exception handler 694 * and poll handler -- * so we skip this for now. 695 * These cases should not happen anyways, or only when the CPU 696 * is already totally * confused. In this case it's likely it will 697 * not fully execute the machine check handler either. 698 */ 699 bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b) 700 { 701 struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 702 bool error_seen = false; 703 struct mce m; 704 int i; 705 706 this_cpu_inc(mce_poll_count); 707 708 mce_gather_info(&m, NULL); 709 710 if (flags & MCP_TIMESTAMP) 711 m.tsc = rdtsc(); 712 713 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { 714 if (!mce_banks[i].ctl || !test_bit(i, *b)) 715 continue; 716 717 m.misc = 0; 718 m.addr = 0; 719 m.bank = i; 720 721 barrier(); 722 m.status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS)); 723 724 /* If this entry is not valid, ignore it */ 725 if (!(m.status & MCI_STATUS_VAL)) 726 continue; 727 728 /* 729 * If we are logging everything (at CPU online) or this 730 * is a corrected error, then we must log it. 731 */ 732 if ((flags & MCP_UC) || !(m.status & MCI_STATUS_UC)) 733 goto log_it; 734 735 /* 736 * Newer Intel systems that support software error 737 * recovery need to make additional checks. Other 738 * CPUs should skip over uncorrected errors, but log 739 * everything else. 740 */ 741 if (!mca_cfg.ser) { 742 if (m.status & MCI_STATUS_UC) 743 continue; 744 goto log_it; 745 } 746 747 /* Log "not enabled" (speculative) errors */ 748 if (!(m.status & MCI_STATUS_EN)) 749 goto log_it; 750 751 /* 752 * Log UCNA (SDM: 15.6.3 "UCR Error Classification") 753 * UC == 1 && PCC == 0 && S == 0 754 */ 755 if (!(m.status & MCI_STATUS_PCC) && !(m.status & MCI_STATUS_S)) 756 goto log_it; 757 758 /* 759 * Skip anything else. Presumption is that our read of this 760 * bank is racing with a machine check. Leave the log alone 761 * for do_machine_check() to deal with it. 762 */ 763 continue; 764 765 log_it: 766 error_seen = true; 767 768 if (flags & MCP_DONTLOG) 769 goto clear_it; 770 771 mce_read_aux(&m, i); 772 m.severity = mce_severity(&m, NULL, mca_cfg.tolerant, NULL, false); 773 /* 774 * Don't get the IP here because it's unlikely to 775 * have anything to do with the actual error location. 776 */ 777 778 if (mca_cfg.dont_log_ce && !mce_usable_address(&m)) 779 goto clear_it; 780 781 if (flags & MCP_QUEUE_LOG) 782 mce_gen_pool_add(&m); 783 else 784 mce_log(&m); 785 786 clear_it: 787 /* 788 * Clear state for this bank. 789 */ 790 mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0); 791 } 792 793 /* 794 * Don't clear MCG_STATUS here because it's only defined for 795 * exceptions. 796 */ 797 798 sync_core(); 799 800 return error_seen; 801 } 802 EXPORT_SYMBOL_GPL(machine_check_poll); 803 804 /* 805 * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and 806 * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM 807 * Vol 3B Table 15-20). But this confuses both the code that determines 808 * whether the machine check occurred in kernel or user mode, and also 809 * the severity assessment code. Pretend that EIPV was set, and take the 810 * ip/cs values from the pt_regs that mce_gather_info() ignored earlier. 811 */ 812 static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs) 813 { 814 if (bank != 0) 815 return; 816 if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0) 817 return; 818 if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC| 819 MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV| 820 MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR| 821 MCACOD)) != 822 (MCI_STATUS_UC|MCI_STATUS_EN| 823 MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S| 824 MCI_STATUS_AR|MCACOD_INSTR)) 825 return; 826 827 m->mcgstatus |= MCG_STATUS_EIPV; 828 m->ip = regs->ip; 829 m->cs = regs->cs; 830 } 831 832 /* 833 * Do a quick check if any of the events requires a panic. 834 * This decides if we keep the events around or clear them. 835 */ 836 static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, 837 struct pt_regs *regs) 838 { 839 char *tmp = *msg; 840 int i; 841 842 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { 843 m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS)); 844 if (!(m->status & MCI_STATUS_VAL)) 845 continue; 846 847 __set_bit(i, validp); 848 if (mce_flags.snb_ifu_quirk) 849 quirk_sandybridge_ifu(i, m, regs); 850 851 m->bank = i; 852 if (mce_severity(m, regs, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { 853 mce_read_aux(m, i); 854 *msg = tmp; 855 return 1; 856 } 857 } 858 return 0; 859 } 860 861 /* 862 * Variable to establish order between CPUs while scanning. 863 * Each CPU spins initially until executing is equal its number. 864 */ 865 static atomic_t mce_executing; 866 867 /* 868 * Defines order of CPUs on entry. First CPU becomes Monarch. 869 */ 870 static atomic_t mce_callin; 871 872 /* 873 * Track which CPUs entered the MCA broadcast synchronization and which not in 874 * order to print holdouts. 875 */ 876 static cpumask_t mce_missing_cpus = CPU_MASK_ALL; 877 878 /* 879 * Check if a timeout waiting for other CPUs happened. 880 */ 881 static noinstr int mce_timed_out(u64 *t, const char *msg) 882 { 883 int ret = 0; 884 885 /* Enable instrumentation around calls to external facilities */ 886 instrumentation_begin(); 887 888 /* 889 * The others already did panic for some reason. 890 * Bail out like in a timeout. 891 * rmb() to tell the compiler that system_state 892 * might have been modified by someone else. 893 */ 894 rmb(); 895 if (atomic_read(&mce_panicked)) 896 wait_for_panic(); 897 if (!mca_cfg.monarch_timeout) 898 goto out; 899 if ((s64)*t < SPINUNIT) { 900 if (mca_cfg.tolerant <= 1) { 901 if (cpumask_and(&mce_missing_cpus, cpu_online_mask, &mce_missing_cpus)) 902 pr_emerg("CPUs not responding to MCE broadcast (may include false positives): %*pbl\n", 903 cpumask_pr_args(&mce_missing_cpus)); 904 mce_panic(msg, NULL, NULL); 905 } 906 ret = 1; 907 goto out; 908 } 909 *t -= SPINUNIT; 910 911 out: 912 touch_nmi_watchdog(); 913 914 instrumentation_end(); 915 916 return ret; 917 } 918 919 /* 920 * The Monarch's reign. The Monarch is the CPU who entered 921 * the machine check handler first. It waits for the others to 922 * raise the exception too and then grades them. When any 923 * error is fatal panic. Only then let the others continue. 924 * 925 * The other CPUs entering the MCE handler will be controlled by the 926 * Monarch. They are called Subjects. 927 * 928 * This way we prevent any potential data corruption in a unrecoverable case 929 * and also makes sure always all CPU's errors are examined. 930 * 931 * Also this detects the case of a machine check event coming from outer 932 * space (not detected by any CPUs) In this case some external agent wants 933 * us to shut down, so panic too. 934 * 935 * The other CPUs might still decide to panic if the handler happens 936 * in a unrecoverable place, but in this case the system is in a semi-stable 937 * state and won't corrupt anything by itself. It's ok to let the others 938 * continue for a bit first. 939 * 940 * All the spin loops have timeouts; when a timeout happens a CPU 941 * typically elects itself to be Monarch. 942 */ 943 static void mce_reign(void) 944 { 945 int cpu; 946 struct mce *m = NULL; 947 int global_worst = 0; 948 char *msg = NULL; 949 950 /* 951 * This CPU is the Monarch and the other CPUs have run 952 * through their handlers. 953 * Grade the severity of the errors of all the CPUs. 954 */ 955 for_each_possible_cpu(cpu) { 956 struct mce *mtmp = &per_cpu(mces_seen, cpu); 957 958 if (mtmp->severity > global_worst) { 959 global_worst = mtmp->severity; 960 m = &per_cpu(mces_seen, cpu); 961 } 962 } 963 964 /* 965 * Cannot recover? Panic here then. 966 * This dumps all the mces in the log buffer and stops the 967 * other CPUs. 968 */ 969 if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) { 970 /* call mce_severity() to get "msg" for panic */ 971 mce_severity(m, NULL, mca_cfg.tolerant, &msg, true); 972 mce_panic("Fatal machine check", m, msg); 973 } 974 975 /* 976 * For UC somewhere we let the CPU who detects it handle it. 977 * Also must let continue the others, otherwise the handling 978 * CPU could deadlock on a lock. 979 */ 980 981 /* 982 * No machine check event found. Must be some external 983 * source or one CPU is hung. Panic. 984 */ 985 if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3) 986 mce_panic("Fatal machine check from unknown source", NULL, NULL); 987 988 /* 989 * Now clear all the mces_seen so that they don't reappear on 990 * the next mce. 991 */ 992 for_each_possible_cpu(cpu) 993 memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce)); 994 } 995 996 static atomic_t global_nwo; 997 998 /* 999 * Start of Monarch synchronization. This waits until all CPUs have 1000 * entered the exception handler and then determines if any of them 1001 * saw a fatal event that requires panic. Then it executes them 1002 * in the entry order. 1003 * TBD double check parallel CPU hotunplug 1004 */ 1005 static noinstr int mce_start(int *no_way_out) 1006 { 1007 u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC; 1008 int order, ret = -1; 1009 1010 if (!timeout) 1011 return ret; 1012 1013 atomic_add(*no_way_out, &global_nwo); 1014 /* 1015 * Rely on the implied barrier below, such that global_nwo 1016 * is updated before mce_callin. 1017 */ 1018 order = atomic_inc_return(&mce_callin); 1019 cpumask_clear_cpu(smp_processor_id(), &mce_missing_cpus); 1020 1021 /* Enable instrumentation around calls to external facilities */ 1022 instrumentation_begin(); 1023 1024 /* 1025 * Wait for everyone. 1026 */ 1027 while (atomic_read(&mce_callin) != num_online_cpus()) { 1028 if (mce_timed_out(&timeout, 1029 "Timeout: Not all CPUs entered broadcast exception handler")) { 1030 atomic_set(&global_nwo, 0); 1031 goto out; 1032 } 1033 ndelay(SPINUNIT); 1034 } 1035 1036 /* 1037 * mce_callin should be read before global_nwo 1038 */ 1039 smp_rmb(); 1040 1041 if (order == 1) { 1042 /* 1043 * Monarch: Starts executing now, the others wait. 1044 */ 1045 atomic_set(&mce_executing, 1); 1046 } else { 1047 /* 1048 * Subject: Now start the scanning loop one by one in 1049 * the original callin order. 1050 * This way when there are any shared banks it will be 1051 * only seen by one CPU before cleared, avoiding duplicates. 1052 */ 1053 while (atomic_read(&mce_executing) < order) { 1054 if (mce_timed_out(&timeout, 1055 "Timeout: Subject CPUs unable to finish machine check processing")) { 1056 atomic_set(&global_nwo, 0); 1057 goto out; 1058 } 1059 ndelay(SPINUNIT); 1060 } 1061 } 1062 1063 /* 1064 * Cache the global no_way_out state. 1065 */ 1066 *no_way_out = atomic_read(&global_nwo); 1067 1068 ret = order; 1069 1070 out: 1071 instrumentation_end(); 1072 1073 return ret; 1074 } 1075 1076 /* 1077 * Synchronize between CPUs after main scanning loop. 1078 * This invokes the bulk of the Monarch processing. 1079 */ 1080 static noinstr int mce_end(int order) 1081 { 1082 u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC; 1083 int ret = -1; 1084 1085 /* Allow instrumentation around external facilities. */ 1086 instrumentation_begin(); 1087 1088 if (!timeout) 1089 goto reset; 1090 if (order < 0) 1091 goto reset; 1092 1093 /* 1094 * Allow others to run. 1095 */ 1096 atomic_inc(&mce_executing); 1097 1098 if (order == 1) { 1099 /* 1100 * Monarch: Wait for everyone to go through their scanning 1101 * loops. 1102 */ 1103 while (atomic_read(&mce_executing) <= num_online_cpus()) { 1104 if (mce_timed_out(&timeout, 1105 "Timeout: Monarch CPU unable to finish machine check processing")) 1106 goto reset; 1107 ndelay(SPINUNIT); 1108 } 1109 1110 mce_reign(); 1111 barrier(); 1112 ret = 0; 1113 } else { 1114 /* 1115 * Subject: Wait for Monarch to finish. 1116 */ 1117 while (atomic_read(&mce_executing) != 0) { 1118 if (mce_timed_out(&timeout, 1119 "Timeout: Monarch CPU did not finish machine check processing")) 1120 goto reset; 1121 ndelay(SPINUNIT); 1122 } 1123 1124 /* 1125 * Don't reset anything. That's done by the Monarch. 1126 */ 1127 ret = 0; 1128 goto out; 1129 } 1130 1131 /* 1132 * Reset all global state. 1133 */ 1134 reset: 1135 atomic_set(&global_nwo, 0); 1136 atomic_set(&mce_callin, 0); 1137 cpumask_setall(&mce_missing_cpus); 1138 barrier(); 1139 1140 /* 1141 * Let others run again. 1142 */ 1143 atomic_set(&mce_executing, 0); 1144 1145 out: 1146 instrumentation_end(); 1147 1148 return ret; 1149 } 1150 1151 static void mce_clear_state(unsigned long *toclear) 1152 { 1153 int i; 1154 1155 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { 1156 if (test_bit(i, toclear)) 1157 mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0); 1158 } 1159 } 1160 1161 /* 1162 * Cases where we avoid rendezvous handler timeout: 1163 * 1) If this CPU is offline. 1164 * 1165 * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to 1166 * skip those CPUs which remain looping in the 1st kernel - see 1167 * crash_nmi_callback(). 1168 * 1169 * Note: there still is a small window between kexec-ing and the new, 1170 * kdump kernel establishing a new #MC handler where a broadcasted MCE 1171 * might not get handled properly. 1172 */ 1173 static noinstr bool mce_check_crashing_cpu(void) 1174 { 1175 unsigned int cpu = smp_processor_id(); 1176 1177 if (arch_cpu_is_offline(cpu) || 1178 (crashing_cpu != -1 && crashing_cpu != cpu)) { 1179 u64 mcgstatus; 1180 1181 mcgstatus = __rdmsr(MSR_IA32_MCG_STATUS); 1182 1183 if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { 1184 if (mcgstatus & MCG_STATUS_LMCES) 1185 return false; 1186 } 1187 1188 if (mcgstatus & MCG_STATUS_RIPV) { 1189 __wrmsr(MSR_IA32_MCG_STATUS, 0, 0); 1190 return true; 1191 } 1192 } 1193 return false; 1194 } 1195 1196 static __always_inline int 1197 __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *final, 1198 unsigned long *toclear, unsigned long *valid_banks, int no_way_out, 1199 int *worst) 1200 { 1201 struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 1202 struct mca_config *cfg = &mca_cfg; 1203 int severity, i, taint = 0; 1204 1205 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { 1206 __clear_bit(i, toclear); 1207 if (!test_bit(i, valid_banks)) 1208 continue; 1209 1210 if (!mce_banks[i].ctl) 1211 continue; 1212 1213 m->misc = 0; 1214 m->addr = 0; 1215 m->bank = i; 1216 1217 m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS)); 1218 if (!(m->status & MCI_STATUS_VAL)) 1219 continue; 1220 1221 /* 1222 * Corrected or non-signaled errors are handled by 1223 * machine_check_poll(). Leave them alone, unless this panics. 1224 */ 1225 if (!(m->status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) && 1226 !no_way_out) 1227 continue; 1228 1229 /* Set taint even when machine check was not enabled. */ 1230 taint++; 1231 1232 severity = mce_severity(m, regs, cfg->tolerant, NULL, true); 1233 1234 /* 1235 * When machine check was for corrected/deferred handler don't 1236 * touch, unless we're panicking. 1237 */ 1238 if ((severity == MCE_KEEP_SEVERITY || 1239 severity == MCE_UCNA_SEVERITY) && !no_way_out) 1240 continue; 1241 1242 __set_bit(i, toclear); 1243 1244 /* Machine check event was not enabled. Clear, but ignore. */ 1245 if (severity == MCE_NO_SEVERITY) 1246 continue; 1247 1248 mce_read_aux(m, i); 1249 1250 /* assuming valid severity level != 0 */ 1251 m->severity = severity; 1252 1253 /* 1254 * Enable instrumentation around the mce_log() call which is 1255 * done in #MC context, where instrumentation is disabled. 1256 */ 1257 instrumentation_begin(); 1258 mce_log(m); 1259 instrumentation_end(); 1260 1261 if (severity > *worst) { 1262 *final = *m; 1263 *worst = severity; 1264 } 1265 } 1266 1267 /* mce_clear_state will clear *final, save locally for use later */ 1268 *m = *final; 1269 1270 return taint; 1271 } 1272 1273 static void kill_me_now(struct callback_head *ch) 1274 { 1275 struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me); 1276 1277 p->mce_count = 0; 1278 force_sig(SIGBUS); 1279 } 1280 1281 static void kill_me_maybe(struct callback_head *cb) 1282 { 1283 struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me); 1284 int flags = MF_ACTION_REQUIRED; 1285 int ret; 1286 1287 p->mce_count = 0; 1288 pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr); 1289 1290 if (!p->mce_ripv) 1291 flags |= MF_MUST_KILL; 1292 1293 ret = memory_failure(p->mce_addr >> PAGE_SHIFT, flags); 1294 if (!ret) { 1295 set_mce_nospec(p->mce_addr >> PAGE_SHIFT, p->mce_whole_page); 1296 sync_core(); 1297 return; 1298 } 1299 1300 /* 1301 * -EHWPOISON from memory_failure() means that it already sent SIGBUS 1302 * to the current process with the proper error info, so no need to 1303 * send SIGBUS here again. 1304 */ 1305 if (ret == -EHWPOISON) 1306 return; 1307 1308 pr_err("Memory error not recovered"); 1309 kill_me_now(cb); 1310 } 1311 1312 static void kill_me_never(struct callback_head *cb) 1313 { 1314 struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me); 1315 1316 p->mce_count = 0; 1317 pr_err("Kernel accessed poison in user space at %llx\n", p->mce_addr); 1318 if (!memory_failure(p->mce_addr >> PAGE_SHIFT, 0)) 1319 set_mce_nospec(p->mce_addr >> PAGE_SHIFT, p->mce_whole_page); 1320 } 1321 1322 static void queue_task_work(struct mce *m, char *msg, void (*func)(struct callback_head *)) 1323 { 1324 int count = ++current->mce_count; 1325 1326 /* First call, save all the details */ 1327 if (count == 1) { 1328 current->mce_addr = m->addr; 1329 current->mce_kflags = m->kflags; 1330 current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV); 1331 current->mce_whole_page = whole_page(m); 1332 current->mce_kill_me.func = func; 1333 } 1334 1335 /* Ten is likely overkill. Don't expect more than two faults before task_work() */ 1336 if (count > 10) 1337 mce_panic("Too many consecutive machine checks while accessing user data", m, msg); 1338 1339 /* Second or later call, make sure page address matches the one from first call */ 1340 if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT)) 1341 mce_panic("Consecutive machine checks to different user pages", m, msg); 1342 1343 /* Do not call task_work_add() more than once */ 1344 if (count > 1) 1345 return; 1346 1347 task_work_add(current, ¤t->mce_kill_me, TWA_RESUME); 1348 } 1349 1350 /* Handle unconfigured int18 (should never happen) */ 1351 static noinstr void unexpected_machine_check(struct pt_regs *regs) 1352 { 1353 instrumentation_begin(); 1354 pr_err("CPU#%d: Unexpected int18 (Machine Check)\n", 1355 smp_processor_id()); 1356 instrumentation_end(); 1357 } 1358 1359 /* 1360 * The actual machine check handler. This only handles real exceptions when 1361 * something got corrupted coming in through int 18. 1362 * 1363 * This is executed in #MC context not subject to normal locking rules. 1364 * This implies that most kernel services cannot be safely used. Don't even 1365 * think about putting a printk in there! 1366 * 1367 * On Intel systems this is entered on all CPUs in parallel through 1368 * MCE broadcast. However some CPUs might be broken beyond repair, 1369 * so be always careful when synchronizing with others. 1370 * 1371 * Tracing and kprobes are disabled: if we interrupted a kernel context 1372 * with IF=1, we need to minimize stack usage. There are also recursion 1373 * issues: if the machine check was due to a failure of the memory 1374 * backing the user stack, tracing that reads the user stack will cause 1375 * potentially infinite recursion. 1376 * 1377 * Currently, the #MC handler calls out to a number of external facilities 1378 * and, therefore, allows instrumentation around them. The optimal thing to 1379 * have would be to do the absolutely minimal work required in #MC context 1380 * and have instrumentation disabled only around that. Further processing can 1381 * then happen in process context where instrumentation is allowed. Achieving 1382 * that requires careful auditing and modifications. Until then, the code 1383 * allows instrumentation temporarily, where required. * 1384 */ 1385 noinstr void do_machine_check(struct pt_regs *regs) 1386 { 1387 int worst = 0, order, no_way_out, kill_current_task, lmce, taint = 0; 1388 DECLARE_BITMAP(valid_banks, MAX_NR_BANKS) = { 0 }; 1389 DECLARE_BITMAP(toclear, MAX_NR_BANKS) = { 0 }; 1390 struct mca_config *cfg = &mca_cfg; 1391 struct mce m, *final; 1392 char *msg = NULL; 1393 1394 if (unlikely(mce_flags.p5)) 1395 return pentium_machine_check(regs); 1396 else if (unlikely(mce_flags.winchip)) 1397 return winchip_machine_check(regs); 1398 else if (unlikely(!mca_cfg.initialized)) 1399 return unexpected_machine_check(regs); 1400 1401 /* 1402 * Establish sequential order between the CPUs entering the machine 1403 * check handler. 1404 */ 1405 order = -1; 1406 1407 /* 1408 * If no_way_out gets set, there is no safe way to recover from this 1409 * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway. 1410 */ 1411 no_way_out = 0; 1412 1413 /* 1414 * If kill_current_task is not set, there might be a way to recover from this 1415 * error. 1416 */ 1417 kill_current_task = 0; 1418 1419 /* 1420 * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES 1421 * on Intel. 1422 */ 1423 lmce = 1; 1424 1425 this_cpu_inc(mce_exception_count); 1426 1427 mce_gather_info(&m, regs); 1428 m.tsc = rdtsc(); 1429 1430 final = this_cpu_ptr(&mces_seen); 1431 *final = m; 1432 1433 no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs); 1434 1435 barrier(); 1436 1437 /* 1438 * When no restart IP might need to kill or panic. 1439 * Assume the worst for now, but if we find the 1440 * severity is MCE_AR_SEVERITY we have other options. 1441 */ 1442 if (!(m.mcgstatus & MCG_STATUS_RIPV)) 1443 kill_current_task = (cfg->tolerant == 3) ? 0 : 1; 1444 /* 1445 * Check if this MCE is signaled to only this logical processor, 1446 * on Intel, Zhaoxin only. 1447 */ 1448 if (m.cpuvendor == X86_VENDOR_INTEL || 1449 m.cpuvendor == X86_VENDOR_ZHAOXIN) 1450 lmce = m.mcgstatus & MCG_STATUS_LMCES; 1451 1452 /* 1453 * Local machine check may already know that we have to panic. 1454 * Broadcast machine check begins rendezvous in mce_start() 1455 * Go through all banks in exclusion of the other CPUs. This way we 1456 * don't report duplicated events on shared banks because the first one 1457 * to see it will clear it. 1458 */ 1459 if (lmce) { 1460 if (no_way_out && cfg->tolerant < 3) 1461 mce_panic("Fatal local machine check", &m, msg); 1462 } else { 1463 order = mce_start(&no_way_out); 1464 } 1465 1466 taint = __mc_scan_banks(&m, regs, final, toclear, valid_banks, no_way_out, &worst); 1467 1468 if (!no_way_out) 1469 mce_clear_state(toclear); 1470 1471 /* 1472 * Do most of the synchronization with other CPUs. 1473 * When there's any problem use only local no_way_out state. 1474 */ 1475 if (!lmce) { 1476 if (mce_end(order) < 0) { 1477 if (!no_way_out) 1478 no_way_out = worst >= MCE_PANIC_SEVERITY; 1479 1480 if (no_way_out && cfg->tolerant < 3) 1481 mce_panic("Fatal machine check on current CPU", &m, msg); 1482 } 1483 } else { 1484 /* 1485 * If there was a fatal machine check we should have 1486 * already called mce_panic earlier in this function. 1487 * Since we re-read the banks, we might have found 1488 * something new. Check again to see if we found a 1489 * fatal error. We call "mce_severity()" again to 1490 * make sure we have the right "msg". 1491 */ 1492 if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) { 1493 mce_severity(&m, regs, cfg->tolerant, &msg, true); 1494 mce_panic("Local fatal machine check!", &m, msg); 1495 } 1496 } 1497 1498 /* 1499 * Enable instrumentation around the external facilities like task_work_add() 1500 * (via queue_task_work()), fixup_exception() etc. For now, that is. Fixing this 1501 * properly would need a lot more involved reorganization. 1502 */ 1503 instrumentation_begin(); 1504 1505 if (taint) 1506 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); 1507 1508 if (worst != MCE_AR_SEVERITY && !kill_current_task) 1509 goto out; 1510 1511 /* Fault was in user mode and we need to take some action */ 1512 if ((m.cs & 3) == 3) { 1513 /* If this triggers there is no way to recover. Die hard. */ 1514 BUG_ON(!on_thread_stack() || !user_mode(regs)); 1515 1516 if (kill_current_task) 1517 queue_task_work(&m, msg, kill_me_now); 1518 else 1519 queue_task_work(&m, msg, kill_me_maybe); 1520 1521 } else { 1522 /* 1523 * Handle an MCE which has happened in kernel space but from 1524 * which the kernel can recover: ex_has_fault_handler() has 1525 * already verified that the rIP at which the error happened is 1526 * a rIP from which the kernel can recover (by jumping to 1527 * recovery code specified in _ASM_EXTABLE_FAULT()) and the 1528 * corresponding exception handler which would do that is the 1529 * proper one. 1530 */ 1531 if (m.kflags & MCE_IN_KERNEL_RECOV) { 1532 if (!fixup_exception(regs, X86_TRAP_MC, 0, 0)) 1533 mce_panic("Failed kernel mode recovery", &m, msg); 1534 } 1535 1536 if (m.kflags & MCE_IN_KERNEL_COPYIN) 1537 queue_task_work(&m, msg, kill_me_never); 1538 } 1539 1540 out: 1541 instrumentation_end(); 1542 1543 mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); 1544 } 1545 EXPORT_SYMBOL_GPL(do_machine_check); 1546 1547 #ifndef CONFIG_MEMORY_FAILURE 1548 int memory_failure(unsigned long pfn, int flags) 1549 { 1550 /* mce_severity() should not hand us an ACTION_REQUIRED error */ 1551 BUG_ON(flags & MF_ACTION_REQUIRED); 1552 pr_err("Uncorrected memory error in page 0x%lx ignored\n" 1553 "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n", 1554 pfn); 1555 1556 return 0; 1557 } 1558 #endif 1559 1560 /* 1561 * Periodic polling timer for "silent" machine check errors. If the 1562 * poller finds an MCE, poll 2x faster. When the poller finds no more 1563 * errors, poll 2x slower (up to check_interval seconds). 1564 */ 1565 static unsigned long check_interval = INITIAL_CHECK_INTERVAL; 1566 1567 static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ 1568 static DEFINE_PER_CPU(struct timer_list, mce_timer); 1569 1570 static unsigned long mce_adjust_timer_default(unsigned long interval) 1571 { 1572 return interval; 1573 } 1574 1575 static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default; 1576 1577 static void __start_timer(struct timer_list *t, unsigned long interval) 1578 { 1579 unsigned long when = jiffies + interval; 1580 unsigned long flags; 1581 1582 local_irq_save(flags); 1583 1584 if (!timer_pending(t) || time_before(when, t->expires)) 1585 mod_timer(t, round_jiffies(when)); 1586 1587 local_irq_restore(flags); 1588 } 1589 1590 static void mce_timer_fn(struct timer_list *t) 1591 { 1592 struct timer_list *cpu_t = this_cpu_ptr(&mce_timer); 1593 unsigned long iv; 1594 1595 WARN_ON(cpu_t != t); 1596 1597 iv = __this_cpu_read(mce_next_interval); 1598 1599 if (mce_available(this_cpu_ptr(&cpu_info))) { 1600 machine_check_poll(0, this_cpu_ptr(&mce_poll_banks)); 1601 1602 if (mce_intel_cmci_poll()) { 1603 iv = mce_adjust_timer(iv); 1604 goto done; 1605 } 1606 } 1607 1608 /* 1609 * Alert userspace if needed. If we logged an MCE, reduce the polling 1610 * interval, otherwise increase the polling interval. 1611 */ 1612 if (mce_notify_irq()) 1613 iv = max(iv / 2, (unsigned long) HZ/100); 1614 else 1615 iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); 1616 1617 done: 1618 __this_cpu_write(mce_next_interval, iv); 1619 __start_timer(t, iv); 1620 } 1621 1622 /* 1623 * Ensure that the timer is firing in @interval from now. 1624 */ 1625 void mce_timer_kick(unsigned long interval) 1626 { 1627 struct timer_list *t = this_cpu_ptr(&mce_timer); 1628 unsigned long iv = __this_cpu_read(mce_next_interval); 1629 1630 __start_timer(t, interval); 1631 1632 if (interval < iv) 1633 __this_cpu_write(mce_next_interval, interval); 1634 } 1635 1636 /* Must not be called in IRQ context where del_timer_sync() can deadlock */ 1637 static void mce_timer_delete_all(void) 1638 { 1639 int cpu; 1640 1641 for_each_online_cpu(cpu) 1642 del_timer_sync(&per_cpu(mce_timer, cpu)); 1643 } 1644 1645 /* 1646 * Notify the user(s) about new machine check events. 1647 * Can be called from interrupt context, but not from machine check/NMI 1648 * context. 1649 */ 1650 int mce_notify_irq(void) 1651 { 1652 /* Not more than two messages every minute */ 1653 static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); 1654 1655 if (test_and_clear_bit(0, &mce_need_notify)) { 1656 mce_work_trigger(); 1657 1658 if (__ratelimit(&ratelimit)) 1659 pr_info(HW_ERR "Machine check events logged\n"); 1660 1661 return 1; 1662 } 1663 return 0; 1664 } 1665 EXPORT_SYMBOL_GPL(mce_notify_irq); 1666 1667 static void __mcheck_cpu_mce_banks_init(void) 1668 { 1669 struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 1670 u8 n_banks = this_cpu_read(mce_num_banks); 1671 int i; 1672 1673 for (i = 0; i < n_banks; i++) { 1674 struct mce_bank *b = &mce_banks[i]; 1675 1676 /* 1677 * Init them all, __mcheck_cpu_apply_quirks() is going to apply 1678 * the required vendor quirks before 1679 * __mcheck_cpu_init_clear_banks() does the final bank setup. 1680 */ 1681 b->ctl = -1ULL; 1682 b->init = true; 1683 } 1684 } 1685 1686 /* 1687 * Initialize Machine Checks for a CPU. 1688 */ 1689 static void __mcheck_cpu_cap_init(void) 1690 { 1691 u64 cap; 1692 u8 b; 1693 1694 rdmsrl(MSR_IA32_MCG_CAP, cap); 1695 1696 b = cap & MCG_BANKCNT_MASK; 1697 1698 if (b > MAX_NR_BANKS) { 1699 pr_warn("CPU%d: Using only %u machine check banks out of %u\n", 1700 smp_processor_id(), MAX_NR_BANKS, b); 1701 b = MAX_NR_BANKS; 1702 } 1703 1704 this_cpu_write(mce_num_banks, b); 1705 1706 __mcheck_cpu_mce_banks_init(); 1707 1708 /* Use accurate RIP reporting if available. */ 1709 if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9) 1710 mca_cfg.rip_msr = MSR_IA32_MCG_EIP; 1711 1712 if (cap & MCG_SER_P) 1713 mca_cfg.ser = 1; 1714 } 1715 1716 static void __mcheck_cpu_init_generic(void) 1717 { 1718 enum mcp_flags m_fl = 0; 1719 mce_banks_t all_banks; 1720 u64 cap; 1721 1722 if (!mca_cfg.bootlog) 1723 m_fl = MCP_DONTLOG; 1724 1725 /* 1726 * Log the machine checks left over from the previous reset. Log them 1727 * only, do not start processing them. That will happen in mcheck_late_init() 1728 * when all consumers have been registered on the notifier chain. 1729 */ 1730 bitmap_fill(all_banks, MAX_NR_BANKS); 1731 machine_check_poll(MCP_UC | MCP_QUEUE_LOG | m_fl, &all_banks); 1732 1733 cr4_set_bits(X86_CR4_MCE); 1734 1735 rdmsrl(MSR_IA32_MCG_CAP, cap); 1736 if (cap & MCG_CTL_P) 1737 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); 1738 } 1739 1740 static void __mcheck_cpu_init_clear_banks(void) 1741 { 1742 struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 1743 int i; 1744 1745 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { 1746 struct mce_bank *b = &mce_banks[i]; 1747 1748 if (!b->init) 1749 continue; 1750 wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl); 1751 wrmsrl(mca_msr_reg(i, MCA_STATUS), 0); 1752 } 1753 } 1754 1755 /* 1756 * Do a final check to see if there are any unused/RAZ banks. 1757 * 1758 * This must be done after the banks have been initialized and any quirks have 1759 * been applied. 1760 * 1761 * Do not call this from any user-initiated flows, e.g. CPU hotplug or sysfs. 1762 * Otherwise, a user who disables a bank will not be able to re-enable it 1763 * without a system reboot. 1764 */ 1765 static void __mcheck_cpu_check_banks(void) 1766 { 1767 struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 1768 u64 msrval; 1769 int i; 1770 1771 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { 1772 struct mce_bank *b = &mce_banks[i]; 1773 1774 if (!b->init) 1775 continue; 1776 1777 rdmsrl(mca_msr_reg(i, MCA_CTL), msrval); 1778 b->init = !!msrval; 1779 } 1780 } 1781 1782 /* Add per CPU specific workarounds here */ 1783 static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) 1784 { 1785 struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 1786 struct mca_config *cfg = &mca_cfg; 1787 1788 if (c->x86_vendor == X86_VENDOR_UNKNOWN) { 1789 pr_info("unknown CPU type - not enabling MCE support\n"); 1790 return -EOPNOTSUPP; 1791 } 1792 1793 /* This should be disabled by the BIOS, but isn't always */ 1794 if (c->x86_vendor == X86_VENDOR_AMD) { 1795 if (c->x86 == 15 && this_cpu_read(mce_num_banks) > 4) { 1796 /* 1797 * disable GART TBL walk error reporting, which 1798 * trips off incorrectly with the IOMMU & 3ware 1799 * & Cerberus: 1800 */ 1801 clear_bit(10, (unsigned long *)&mce_banks[4].ctl); 1802 } 1803 if (c->x86 < 0x11 && cfg->bootlog < 0) { 1804 /* 1805 * Lots of broken BIOS around that don't clear them 1806 * by default and leave crap in there. Don't log: 1807 */ 1808 cfg->bootlog = 0; 1809 } 1810 /* 1811 * Various K7s with broken bank 0 around. Always disable 1812 * by default. 1813 */ 1814 if (c->x86 == 6 && this_cpu_read(mce_num_banks) > 0) 1815 mce_banks[0].ctl = 0; 1816 1817 /* 1818 * overflow_recov is supported for F15h Models 00h-0fh 1819 * even though we don't have a CPUID bit for it. 1820 */ 1821 if (c->x86 == 0x15 && c->x86_model <= 0xf) 1822 mce_flags.overflow_recov = 1; 1823 1824 } 1825 1826 if (c->x86_vendor == X86_VENDOR_INTEL) { 1827 /* 1828 * SDM documents that on family 6 bank 0 should not be written 1829 * because it aliases to another special BIOS controlled 1830 * register. 1831 * But it's not aliased anymore on model 0x1a+ 1832 * Don't ignore bank 0 completely because there could be a 1833 * valid event later, merely don't write CTL0. 1834 */ 1835 1836 if (c->x86 == 6 && c->x86_model < 0x1A && this_cpu_read(mce_num_banks) > 0) 1837 mce_banks[0].init = false; 1838 1839 /* 1840 * All newer Intel systems support MCE broadcasting. Enable 1841 * synchronization with a one second timeout. 1842 */ 1843 if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) && 1844 cfg->monarch_timeout < 0) 1845 cfg->monarch_timeout = USEC_PER_SEC; 1846 1847 /* 1848 * There are also broken BIOSes on some Pentium M and 1849 * earlier systems: 1850 */ 1851 if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0) 1852 cfg->bootlog = 0; 1853 1854 if (c->x86 == 6 && c->x86_model == 45) 1855 mce_flags.snb_ifu_quirk = 1; 1856 } 1857 1858 if (c->x86_vendor == X86_VENDOR_ZHAOXIN) { 1859 /* 1860 * All newer Zhaoxin CPUs support MCE broadcasting. Enable 1861 * synchronization with a one second timeout. 1862 */ 1863 if (c->x86 > 6 || (c->x86_model == 0x19 || c->x86_model == 0x1f)) { 1864 if (cfg->monarch_timeout < 0) 1865 cfg->monarch_timeout = USEC_PER_SEC; 1866 } 1867 } 1868 1869 if (cfg->monarch_timeout < 0) 1870 cfg->monarch_timeout = 0; 1871 if (cfg->bootlog != 0) 1872 cfg->panic_timeout = 30; 1873 1874 return 0; 1875 } 1876 1877 static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) 1878 { 1879 if (c->x86 != 5) 1880 return 0; 1881 1882 switch (c->x86_vendor) { 1883 case X86_VENDOR_INTEL: 1884 intel_p5_mcheck_init(c); 1885 mce_flags.p5 = 1; 1886 return 1; 1887 case X86_VENDOR_CENTAUR: 1888 winchip_mcheck_init(c); 1889 mce_flags.winchip = 1; 1890 return 1; 1891 default: 1892 return 0; 1893 } 1894 1895 return 0; 1896 } 1897 1898 /* 1899 * Init basic CPU features needed for early decoding of MCEs. 1900 */ 1901 static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c) 1902 { 1903 if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) { 1904 mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV); 1905 mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR); 1906 mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA); 1907 mce_flags.amd_threshold = 1; 1908 } 1909 } 1910 1911 static void mce_centaur_feature_init(struct cpuinfo_x86 *c) 1912 { 1913 struct mca_config *cfg = &mca_cfg; 1914 1915 /* 1916 * All newer Centaur CPUs support MCE broadcasting. Enable 1917 * synchronization with a one second timeout. 1918 */ 1919 if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) || 1920 c->x86 > 6) { 1921 if (cfg->monarch_timeout < 0) 1922 cfg->monarch_timeout = USEC_PER_SEC; 1923 } 1924 } 1925 1926 static void mce_zhaoxin_feature_init(struct cpuinfo_x86 *c) 1927 { 1928 struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 1929 1930 /* 1931 * These CPUs have MCA bank 8 which reports only one error type called 1932 * SVAD (System View Address Decoder). The reporting of that error is 1933 * controlled by IA32_MC8.CTL.0. 1934 * 1935 * If enabled, prefetching on these CPUs will cause SVAD MCE when 1936 * virtual machines start and result in a system panic. Always disable 1937 * bank 8 SVAD error by default. 1938 */ 1939 if ((c->x86 == 7 && c->x86_model == 0x1b) || 1940 (c->x86_model == 0x19 || c->x86_model == 0x1f)) { 1941 if (this_cpu_read(mce_num_banks) > 8) 1942 mce_banks[8].ctl = 0; 1943 } 1944 1945 intel_init_cmci(); 1946 intel_init_lmce(); 1947 mce_adjust_timer = cmci_intel_adjust_timer; 1948 } 1949 1950 static void mce_zhaoxin_feature_clear(struct cpuinfo_x86 *c) 1951 { 1952 intel_clear_lmce(); 1953 } 1954 1955 static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) 1956 { 1957 switch (c->x86_vendor) { 1958 case X86_VENDOR_INTEL: 1959 mce_intel_feature_init(c); 1960 mce_adjust_timer = cmci_intel_adjust_timer; 1961 break; 1962 1963 case X86_VENDOR_AMD: { 1964 mce_amd_feature_init(c); 1965 break; 1966 } 1967 1968 case X86_VENDOR_HYGON: 1969 mce_hygon_feature_init(c); 1970 break; 1971 1972 case X86_VENDOR_CENTAUR: 1973 mce_centaur_feature_init(c); 1974 break; 1975 1976 case X86_VENDOR_ZHAOXIN: 1977 mce_zhaoxin_feature_init(c); 1978 break; 1979 1980 default: 1981 break; 1982 } 1983 } 1984 1985 static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c) 1986 { 1987 switch (c->x86_vendor) { 1988 case X86_VENDOR_INTEL: 1989 mce_intel_feature_clear(c); 1990 break; 1991 1992 case X86_VENDOR_ZHAOXIN: 1993 mce_zhaoxin_feature_clear(c); 1994 break; 1995 1996 default: 1997 break; 1998 } 1999 } 2000 2001 static void mce_start_timer(struct timer_list *t) 2002 { 2003 unsigned long iv = check_interval * HZ; 2004 2005 if (mca_cfg.ignore_ce || !iv) 2006 return; 2007 2008 this_cpu_write(mce_next_interval, iv); 2009 __start_timer(t, iv); 2010 } 2011 2012 static void __mcheck_cpu_setup_timer(void) 2013 { 2014 struct timer_list *t = this_cpu_ptr(&mce_timer); 2015 2016 timer_setup(t, mce_timer_fn, TIMER_PINNED); 2017 } 2018 2019 static void __mcheck_cpu_init_timer(void) 2020 { 2021 struct timer_list *t = this_cpu_ptr(&mce_timer); 2022 2023 timer_setup(t, mce_timer_fn, TIMER_PINNED); 2024 mce_start_timer(t); 2025 } 2026 2027 bool filter_mce(struct mce *m) 2028 { 2029 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) 2030 return amd_filter_mce(m); 2031 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 2032 return intel_filter_mce(m); 2033 2034 return false; 2035 } 2036 2037 static __always_inline void exc_machine_check_kernel(struct pt_regs *regs) 2038 { 2039 irqentry_state_t irq_state; 2040 2041 WARN_ON_ONCE(user_mode(regs)); 2042 2043 /* 2044 * Only required when from kernel mode. See 2045 * mce_check_crashing_cpu() for details. 2046 */ 2047 if (mca_cfg.initialized && mce_check_crashing_cpu()) 2048 return; 2049 2050 irq_state = irqentry_nmi_enter(regs); 2051 2052 do_machine_check(regs); 2053 2054 irqentry_nmi_exit(regs, irq_state); 2055 } 2056 2057 static __always_inline void exc_machine_check_user(struct pt_regs *regs) 2058 { 2059 irqentry_enter_from_user_mode(regs); 2060 2061 do_machine_check(regs); 2062 2063 irqentry_exit_to_user_mode(regs); 2064 } 2065 2066 #ifdef CONFIG_X86_64 2067 /* MCE hit kernel mode */ 2068 DEFINE_IDTENTRY_MCE(exc_machine_check) 2069 { 2070 unsigned long dr7; 2071 2072 dr7 = local_db_save(); 2073 exc_machine_check_kernel(regs); 2074 local_db_restore(dr7); 2075 } 2076 2077 /* The user mode variant. */ 2078 DEFINE_IDTENTRY_MCE_USER(exc_machine_check) 2079 { 2080 unsigned long dr7; 2081 2082 dr7 = local_db_save(); 2083 exc_machine_check_user(regs); 2084 local_db_restore(dr7); 2085 } 2086 #else 2087 /* 32bit unified entry point */ 2088 DEFINE_IDTENTRY_RAW(exc_machine_check) 2089 { 2090 unsigned long dr7; 2091 2092 dr7 = local_db_save(); 2093 if (user_mode(regs)) 2094 exc_machine_check_user(regs); 2095 else 2096 exc_machine_check_kernel(regs); 2097 local_db_restore(dr7); 2098 } 2099 #endif 2100 2101 /* 2102 * Called for each booted CPU to set up machine checks. 2103 * Must be called with preempt off: 2104 */ 2105 void mcheck_cpu_init(struct cpuinfo_x86 *c) 2106 { 2107 if (mca_cfg.disabled) 2108 return; 2109 2110 if (__mcheck_cpu_ancient_init(c)) 2111 return; 2112 2113 if (!mce_available(c)) 2114 return; 2115 2116 __mcheck_cpu_cap_init(); 2117 2118 if (__mcheck_cpu_apply_quirks(c) < 0) { 2119 mca_cfg.disabled = 1; 2120 return; 2121 } 2122 2123 if (mce_gen_pool_init()) { 2124 mca_cfg.disabled = 1; 2125 pr_emerg("Couldn't allocate MCE records pool!\n"); 2126 return; 2127 } 2128 2129 mca_cfg.initialized = 1; 2130 2131 __mcheck_cpu_init_early(c); 2132 __mcheck_cpu_init_generic(); 2133 __mcheck_cpu_init_vendor(c); 2134 __mcheck_cpu_init_clear_banks(); 2135 __mcheck_cpu_check_banks(); 2136 __mcheck_cpu_setup_timer(); 2137 } 2138 2139 /* 2140 * Called for each booted CPU to clear some machine checks opt-ins 2141 */ 2142 void mcheck_cpu_clear(struct cpuinfo_x86 *c) 2143 { 2144 if (mca_cfg.disabled) 2145 return; 2146 2147 if (!mce_available(c)) 2148 return; 2149 2150 /* 2151 * Possibly to clear general settings generic to x86 2152 * __mcheck_cpu_clear_generic(c); 2153 */ 2154 __mcheck_cpu_clear_vendor(c); 2155 2156 } 2157 2158 static void __mce_disable_bank(void *arg) 2159 { 2160 int bank = *((int *)arg); 2161 __clear_bit(bank, this_cpu_ptr(mce_poll_banks)); 2162 cmci_disable_bank(bank); 2163 } 2164 2165 void mce_disable_bank(int bank) 2166 { 2167 if (bank >= this_cpu_read(mce_num_banks)) { 2168 pr_warn(FW_BUG 2169 "Ignoring request to disable invalid MCA bank %d.\n", 2170 bank); 2171 return; 2172 } 2173 set_bit(bank, mce_banks_ce_disabled); 2174 on_each_cpu(__mce_disable_bank, &bank, 1); 2175 } 2176 2177 /* 2178 * mce=off Disables machine check 2179 * mce=no_cmci Disables CMCI 2180 * mce=no_lmce Disables LMCE 2181 * mce=dont_log_ce Clears corrected events silently, no log created for CEs. 2182 * mce=print_all Print all machine check logs to console 2183 * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared. 2184 * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above) 2185 * monarchtimeout is how long to wait for other CPUs on machine 2186 * check, or 0 to not wait 2187 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD Fam10h 2188 and older. 2189 * mce=nobootlog Don't log MCEs from before booting. 2190 * mce=bios_cmci_threshold Don't program the CMCI threshold 2191 * mce=recovery force enable copy_mc_fragile() 2192 */ 2193 static int __init mcheck_enable(char *str) 2194 { 2195 struct mca_config *cfg = &mca_cfg; 2196 2197 if (*str == 0) { 2198 enable_p5_mce(); 2199 return 1; 2200 } 2201 if (*str == '=') 2202 str++; 2203 if (!strcmp(str, "off")) 2204 cfg->disabled = 1; 2205 else if (!strcmp(str, "no_cmci")) 2206 cfg->cmci_disabled = true; 2207 else if (!strcmp(str, "no_lmce")) 2208 cfg->lmce_disabled = 1; 2209 else if (!strcmp(str, "dont_log_ce")) 2210 cfg->dont_log_ce = true; 2211 else if (!strcmp(str, "print_all")) 2212 cfg->print_all = true; 2213 else if (!strcmp(str, "ignore_ce")) 2214 cfg->ignore_ce = true; 2215 else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog")) 2216 cfg->bootlog = (str[0] == 'b'); 2217 else if (!strcmp(str, "bios_cmci_threshold")) 2218 cfg->bios_cmci_threshold = 1; 2219 else if (!strcmp(str, "recovery")) 2220 cfg->recovery = 1; 2221 else if (isdigit(str[0])) { 2222 if (get_option(&str, &cfg->tolerant) == 2) 2223 get_option(&str, &(cfg->monarch_timeout)); 2224 } else { 2225 pr_info("mce argument %s ignored. Please use /sys\n", str); 2226 return 0; 2227 } 2228 return 1; 2229 } 2230 __setup("mce", mcheck_enable); 2231 2232 int __init mcheck_init(void) 2233 { 2234 mce_register_decode_chain(&early_nb); 2235 mce_register_decode_chain(&mce_uc_nb); 2236 mce_register_decode_chain(&mce_default_nb); 2237 2238 INIT_WORK(&mce_work, mce_gen_pool_process); 2239 init_irq_work(&mce_irq_work, mce_irq_work_cb); 2240 2241 return 0; 2242 } 2243 2244 /* 2245 * mce_syscore: PM support 2246 */ 2247 2248 /* 2249 * Disable machine checks on suspend and shutdown. We can't really handle 2250 * them later. 2251 */ 2252 static void mce_disable_error_reporting(void) 2253 { 2254 struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 2255 int i; 2256 2257 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { 2258 struct mce_bank *b = &mce_banks[i]; 2259 2260 if (b->init) 2261 wrmsrl(mca_msr_reg(i, MCA_CTL), 0); 2262 } 2263 return; 2264 } 2265 2266 static void vendor_disable_error_reporting(void) 2267 { 2268 /* 2269 * Don't clear on Intel or AMD or Hygon or Zhaoxin CPUs. Some of these 2270 * MSRs are socket-wide. Disabling them for just a single offlined CPU 2271 * is bad, since it will inhibit reporting for all shared resources on 2272 * the socket like the last level cache (LLC), the integrated memory 2273 * controller (iMC), etc. 2274 */ 2275 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL || 2276 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON || 2277 boot_cpu_data.x86_vendor == X86_VENDOR_AMD || 2278 boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) 2279 return; 2280 2281 mce_disable_error_reporting(); 2282 } 2283 2284 static int mce_syscore_suspend(void) 2285 { 2286 vendor_disable_error_reporting(); 2287 return 0; 2288 } 2289 2290 static void mce_syscore_shutdown(void) 2291 { 2292 vendor_disable_error_reporting(); 2293 } 2294 2295 /* 2296 * On resume clear all MCE state. Don't want to see leftovers from the BIOS. 2297 * Only one CPU is active at this time, the others get re-added later using 2298 * CPU hotplug: 2299 */ 2300 static void mce_syscore_resume(void) 2301 { 2302 __mcheck_cpu_init_generic(); 2303 __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info)); 2304 __mcheck_cpu_init_clear_banks(); 2305 } 2306 2307 static struct syscore_ops mce_syscore_ops = { 2308 .suspend = mce_syscore_suspend, 2309 .shutdown = mce_syscore_shutdown, 2310 .resume = mce_syscore_resume, 2311 }; 2312 2313 /* 2314 * mce_device: Sysfs support 2315 */ 2316 2317 static void mce_cpu_restart(void *data) 2318 { 2319 if (!mce_available(raw_cpu_ptr(&cpu_info))) 2320 return; 2321 __mcheck_cpu_init_generic(); 2322 __mcheck_cpu_init_clear_banks(); 2323 __mcheck_cpu_init_timer(); 2324 } 2325 2326 /* Reinit MCEs after user configuration changes */ 2327 static void mce_restart(void) 2328 { 2329 mce_timer_delete_all(); 2330 on_each_cpu(mce_cpu_restart, NULL, 1); 2331 } 2332 2333 /* Toggle features for corrected errors */ 2334 static void mce_disable_cmci(void *data) 2335 { 2336 if (!mce_available(raw_cpu_ptr(&cpu_info))) 2337 return; 2338 cmci_clear(); 2339 } 2340 2341 static void mce_enable_ce(void *all) 2342 { 2343 if (!mce_available(raw_cpu_ptr(&cpu_info))) 2344 return; 2345 cmci_reenable(); 2346 cmci_recheck(); 2347 if (all) 2348 __mcheck_cpu_init_timer(); 2349 } 2350 2351 static struct bus_type mce_subsys = { 2352 .name = "machinecheck", 2353 .dev_name = "machinecheck", 2354 }; 2355 2356 DEFINE_PER_CPU(struct device *, mce_device); 2357 2358 static inline struct mce_bank_dev *attr_to_bank(struct device_attribute *attr) 2359 { 2360 return container_of(attr, struct mce_bank_dev, attr); 2361 } 2362 2363 static ssize_t show_bank(struct device *s, struct device_attribute *attr, 2364 char *buf) 2365 { 2366 u8 bank = attr_to_bank(attr)->bank; 2367 struct mce_bank *b; 2368 2369 if (bank >= per_cpu(mce_num_banks, s->id)) 2370 return -EINVAL; 2371 2372 b = &per_cpu(mce_banks_array, s->id)[bank]; 2373 2374 if (!b->init) 2375 return -ENODEV; 2376 2377 return sprintf(buf, "%llx\n", b->ctl); 2378 } 2379 2380 static ssize_t set_bank(struct device *s, struct device_attribute *attr, 2381 const char *buf, size_t size) 2382 { 2383 u8 bank = attr_to_bank(attr)->bank; 2384 struct mce_bank *b; 2385 u64 new; 2386 2387 if (kstrtou64(buf, 0, &new) < 0) 2388 return -EINVAL; 2389 2390 if (bank >= per_cpu(mce_num_banks, s->id)) 2391 return -EINVAL; 2392 2393 b = &per_cpu(mce_banks_array, s->id)[bank]; 2394 2395 if (!b->init) 2396 return -ENODEV; 2397 2398 b->ctl = new; 2399 mce_restart(); 2400 2401 return size; 2402 } 2403 2404 static ssize_t set_ignore_ce(struct device *s, 2405 struct device_attribute *attr, 2406 const char *buf, size_t size) 2407 { 2408 u64 new; 2409 2410 if (kstrtou64(buf, 0, &new) < 0) 2411 return -EINVAL; 2412 2413 mutex_lock(&mce_sysfs_mutex); 2414 if (mca_cfg.ignore_ce ^ !!new) { 2415 if (new) { 2416 /* disable ce features */ 2417 mce_timer_delete_all(); 2418 on_each_cpu(mce_disable_cmci, NULL, 1); 2419 mca_cfg.ignore_ce = true; 2420 } else { 2421 /* enable ce features */ 2422 mca_cfg.ignore_ce = false; 2423 on_each_cpu(mce_enable_ce, (void *)1, 1); 2424 } 2425 } 2426 mutex_unlock(&mce_sysfs_mutex); 2427 2428 return size; 2429 } 2430 2431 static ssize_t set_cmci_disabled(struct device *s, 2432 struct device_attribute *attr, 2433 const char *buf, size_t size) 2434 { 2435 u64 new; 2436 2437 if (kstrtou64(buf, 0, &new) < 0) 2438 return -EINVAL; 2439 2440 mutex_lock(&mce_sysfs_mutex); 2441 if (mca_cfg.cmci_disabled ^ !!new) { 2442 if (new) { 2443 /* disable cmci */ 2444 on_each_cpu(mce_disable_cmci, NULL, 1); 2445 mca_cfg.cmci_disabled = true; 2446 } else { 2447 /* enable cmci */ 2448 mca_cfg.cmci_disabled = false; 2449 on_each_cpu(mce_enable_ce, NULL, 1); 2450 } 2451 } 2452 mutex_unlock(&mce_sysfs_mutex); 2453 2454 return size; 2455 } 2456 2457 static ssize_t store_int_with_restart(struct device *s, 2458 struct device_attribute *attr, 2459 const char *buf, size_t size) 2460 { 2461 unsigned long old_check_interval = check_interval; 2462 ssize_t ret = device_store_ulong(s, attr, buf, size); 2463 2464 if (check_interval == old_check_interval) 2465 return ret; 2466 2467 mutex_lock(&mce_sysfs_mutex); 2468 mce_restart(); 2469 mutex_unlock(&mce_sysfs_mutex); 2470 2471 return ret; 2472 } 2473 2474 static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant); 2475 static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout); 2476 static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce); 2477 static DEVICE_BOOL_ATTR(print_all, 0644, mca_cfg.print_all); 2478 2479 static struct dev_ext_attribute dev_attr_check_interval = { 2480 __ATTR(check_interval, 0644, device_show_int, store_int_with_restart), 2481 &check_interval 2482 }; 2483 2484 static struct dev_ext_attribute dev_attr_ignore_ce = { 2485 __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce), 2486 &mca_cfg.ignore_ce 2487 }; 2488 2489 static struct dev_ext_attribute dev_attr_cmci_disabled = { 2490 __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled), 2491 &mca_cfg.cmci_disabled 2492 }; 2493 2494 static struct device_attribute *mce_device_attrs[] = { 2495 &dev_attr_tolerant.attr, 2496 &dev_attr_check_interval.attr, 2497 #ifdef CONFIG_X86_MCELOG_LEGACY 2498 &dev_attr_trigger, 2499 #endif 2500 &dev_attr_monarch_timeout.attr, 2501 &dev_attr_dont_log_ce.attr, 2502 &dev_attr_print_all.attr, 2503 &dev_attr_ignore_ce.attr, 2504 &dev_attr_cmci_disabled.attr, 2505 NULL 2506 }; 2507 2508 static cpumask_var_t mce_device_initialized; 2509 2510 static void mce_device_release(struct device *dev) 2511 { 2512 kfree(dev); 2513 } 2514 2515 /* Per CPU device init. All of the CPUs still share the same bank device: */ 2516 static int mce_device_create(unsigned int cpu) 2517 { 2518 struct device *dev; 2519 int err; 2520 int i, j; 2521 2522 if (!mce_available(&boot_cpu_data)) 2523 return -EIO; 2524 2525 dev = per_cpu(mce_device, cpu); 2526 if (dev) 2527 return 0; 2528 2529 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2530 if (!dev) 2531 return -ENOMEM; 2532 dev->id = cpu; 2533 dev->bus = &mce_subsys; 2534 dev->release = &mce_device_release; 2535 2536 err = device_register(dev); 2537 if (err) { 2538 put_device(dev); 2539 return err; 2540 } 2541 2542 for (i = 0; mce_device_attrs[i]; i++) { 2543 err = device_create_file(dev, mce_device_attrs[i]); 2544 if (err) 2545 goto error; 2546 } 2547 for (j = 0; j < per_cpu(mce_num_banks, cpu); j++) { 2548 err = device_create_file(dev, &mce_bank_devs[j].attr); 2549 if (err) 2550 goto error2; 2551 } 2552 cpumask_set_cpu(cpu, mce_device_initialized); 2553 per_cpu(mce_device, cpu) = dev; 2554 2555 return 0; 2556 error2: 2557 while (--j >= 0) 2558 device_remove_file(dev, &mce_bank_devs[j].attr); 2559 error: 2560 while (--i >= 0) 2561 device_remove_file(dev, mce_device_attrs[i]); 2562 2563 device_unregister(dev); 2564 2565 return err; 2566 } 2567 2568 static void mce_device_remove(unsigned int cpu) 2569 { 2570 struct device *dev = per_cpu(mce_device, cpu); 2571 int i; 2572 2573 if (!cpumask_test_cpu(cpu, mce_device_initialized)) 2574 return; 2575 2576 for (i = 0; mce_device_attrs[i]; i++) 2577 device_remove_file(dev, mce_device_attrs[i]); 2578 2579 for (i = 0; i < per_cpu(mce_num_banks, cpu); i++) 2580 device_remove_file(dev, &mce_bank_devs[i].attr); 2581 2582 device_unregister(dev); 2583 cpumask_clear_cpu(cpu, mce_device_initialized); 2584 per_cpu(mce_device, cpu) = NULL; 2585 } 2586 2587 /* Make sure there are no machine checks on offlined CPUs. */ 2588 static void mce_disable_cpu(void) 2589 { 2590 if (!mce_available(raw_cpu_ptr(&cpu_info))) 2591 return; 2592 2593 if (!cpuhp_tasks_frozen) 2594 cmci_clear(); 2595 2596 vendor_disable_error_reporting(); 2597 } 2598 2599 static void mce_reenable_cpu(void) 2600 { 2601 struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 2602 int i; 2603 2604 if (!mce_available(raw_cpu_ptr(&cpu_info))) 2605 return; 2606 2607 if (!cpuhp_tasks_frozen) 2608 cmci_reenable(); 2609 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { 2610 struct mce_bank *b = &mce_banks[i]; 2611 2612 if (b->init) 2613 wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl); 2614 } 2615 } 2616 2617 static int mce_cpu_dead(unsigned int cpu) 2618 { 2619 mce_intel_hcpu_update(cpu); 2620 2621 /* intentionally ignoring frozen here */ 2622 if (!cpuhp_tasks_frozen) 2623 cmci_rediscover(); 2624 return 0; 2625 } 2626 2627 static int mce_cpu_online(unsigned int cpu) 2628 { 2629 struct timer_list *t = this_cpu_ptr(&mce_timer); 2630 int ret; 2631 2632 mce_device_create(cpu); 2633 2634 ret = mce_threshold_create_device(cpu); 2635 if (ret) { 2636 mce_device_remove(cpu); 2637 return ret; 2638 } 2639 mce_reenable_cpu(); 2640 mce_start_timer(t); 2641 return 0; 2642 } 2643 2644 static int mce_cpu_pre_down(unsigned int cpu) 2645 { 2646 struct timer_list *t = this_cpu_ptr(&mce_timer); 2647 2648 mce_disable_cpu(); 2649 del_timer_sync(t); 2650 mce_threshold_remove_device(cpu); 2651 mce_device_remove(cpu); 2652 return 0; 2653 } 2654 2655 static __init void mce_init_banks(void) 2656 { 2657 int i; 2658 2659 for (i = 0; i < MAX_NR_BANKS; i++) { 2660 struct mce_bank_dev *b = &mce_bank_devs[i]; 2661 struct device_attribute *a = &b->attr; 2662 2663 b->bank = i; 2664 2665 sysfs_attr_init(&a->attr); 2666 a->attr.name = b->attrname; 2667 snprintf(b->attrname, ATTR_LEN, "bank%d", i); 2668 2669 a->attr.mode = 0644; 2670 a->show = show_bank; 2671 a->store = set_bank; 2672 } 2673 } 2674 2675 /* 2676 * When running on XEN, this initcall is ordered against the XEN mcelog 2677 * initcall: 2678 * 2679 * device_initcall(xen_late_init_mcelog); 2680 * device_initcall_sync(mcheck_init_device); 2681 */ 2682 static __init int mcheck_init_device(void) 2683 { 2684 int err; 2685 2686 /* 2687 * Check if we have a spare virtual bit. This will only become 2688 * a problem if/when we move beyond 5-level page tables. 2689 */ 2690 MAYBE_BUILD_BUG_ON(__VIRTUAL_MASK_SHIFT >= 63); 2691 2692 if (!mce_available(&boot_cpu_data)) { 2693 err = -EIO; 2694 goto err_out; 2695 } 2696 2697 if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) { 2698 err = -ENOMEM; 2699 goto err_out; 2700 } 2701 2702 mce_init_banks(); 2703 2704 err = subsys_system_register(&mce_subsys, NULL); 2705 if (err) 2706 goto err_out_mem; 2707 2708 err = cpuhp_setup_state(CPUHP_X86_MCE_DEAD, "x86/mce:dead", NULL, 2709 mce_cpu_dead); 2710 if (err) 2711 goto err_out_mem; 2712 2713 /* 2714 * Invokes mce_cpu_online() on all CPUs which are online when 2715 * the state is installed. 2716 */ 2717 err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/mce:online", 2718 mce_cpu_online, mce_cpu_pre_down); 2719 if (err < 0) 2720 goto err_out_online; 2721 2722 register_syscore_ops(&mce_syscore_ops); 2723 2724 return 0; 2725 2726 err_out_online: 2727 cpuhp_remove_state(CPUHP_X86_MCE_DEAD); 2728 2729 err_out_mem: 2730 free_cpumask_var(mce_device_initialized); 2731 2732 err_out: 2733 pr_err("Unable to init MCE device (rc: %d)\n", err); 2734 2735 return err; 2736 } 2737 device_initcall_sync(mcheck_init_device); 2738 2739 /* 2740 * Old style boot options parsing. Only for compatibility. 2741 */ 2742 static int __init mcheck_disable(char *str) 2743 { 2744 mca_cfg.disabled = 1; 2745 return 1; 2746 } 2747 __setup("nomce", mcheck_disable); 2748 2749 #ifdef CONFIG_DEBUG_FS 2750 struct dentry *mce_get_debugfs_dir(void) 2751 { 2752 static struct dentry *dmce; 2753 2754 if (!dmce) 2755 dmce = debugfs_create_dir("mce", NULL); 2756 2757 return dmce; 2758 } 2759 2760 static void mce_reset(void) 2761 { 2762 atomic_set(&mce_fake_panicked, 0); 2763 atomic_set(&mce_executing, 0); 2764 atomic_set(&mce_callin, 0); 2765 atomic_set(&global_nwo, 0); 2766 cpumask_setall(&mce_missing_cpus); 2767 } 2768 2769 static int fake_panic_get(void *data, u64 *val) 2770 { 2771 *val = fake_panic; 2772 return 0; 2773 } 2774 2775 static int fake_panic_set(void *data, u64 val) 2776 { 2777 mce_reset(); 2778 fake_panic = val; 2779 return 0; 2780 } 2781 2782 DEFINE_DEBUGFS_ATTRIBUTE(fake_panic_fops, fake_panic_get, fake_panic_set, 2783 "%llu\n"); 2784 2785 static void __init mcheck_debugfs_init(void) 2786 { 2787 struct dentry *dmce; 2788 2789 dmce = mce_get_debugfs_dir(); 2790 debugfs_create_file_unsafe("fake_panic", 0444, dmce, NULL, 2791 &fake_panic_fops); 2792 } 2793 #else 2794 static void __init mcheck_debugfs_init(void) { } 2795 #endif 2796 2797 static int __init mcheck_late_init(void) 2798 { 2799 if (mca_cfg.recovery) 2800 enable_copy_mc_fragile(); 2801 2802 mcheck_debugfs_init(); 2803 2804 /* 2805 * Flush out everything that has been logged during early boot, now that 2806 * everything has been initialized (workqueues, decoders, ...). 2807 */ 2808 mce_schedule_work(); 2809 2810 return 0; 2811 } 2812 late_initcall(mcheck_late_init); 2813