1 /* 2 * (c) 2005-2016 Advanced Micro Devices, Inc. 3 * Your use of this code is subject to the terms and conditions of the 4 * GNU general public license version 2. See "COPYING" or 5 * http://www.gnu.org/licenses/gpl.html 6 * 7 * Written by Jacob Shin - AMD, Inc. 8 * Maintained by: Borislav Petkov <bp@alien8.de> 9 * 10 * All MC4_MISCi registers are shared between cores on a node. 11 */ 12 #include <linux/interrupt.h> 13 #include <linux/notifier.h> 14 #include <linux/kobject.h> 15 #include <linux/percpu.h> 16 #include <linux/errno.h> 17 #include <linux/sched.h> 18 #include <linux/sysfs.h> 19 #include <linux/slab.h> 20 #include <linux/init.h> 21 #include <linux/cpu.h> 22 #include <linux/smp.h> 23 #include <linux/string.h> 24 25 #include <asm/amd_nb.h> 26 #include <asm/traps.h> 27 #include <asm/apic.h> 28 #include <asm/mce.h> 29 #include <asm/msr.h> 30 #include <asm/trace/irq_vectors.h> 31 32 #include "internal.h" 33 34 #define NR_BLOCKS 5 35 #define THRESHOLD_MAX 0xFFF 36 #define INT_TYPE_APIC 0x00020000 37 #define MASK_VALID_HI 0x80000000 38 #define MASK_CNTP_HI 0x40000000 39 #define MASK_LOCKED_HI 0x20000000 40 #define MASK_LVTOFF_HI 0x00F00000 41 #define MASK_COUNT_EN_HI 0x00080000 42 #define MASK_INT_TYPE_HI 0x00060000 43 #define MASK_OVERFLOW_HI 0x00010000 44 #define MASK_ERR_COUNT_HI 0x00000FFF 45 #define MASK_BLKPTR_LO 0xFF000000 46 #define MCG_XBLK_ADDR 0xC0000400 47 48 /* Deferred error settings */ 49 #define MSR_CU_DEF_ERR 0xC0000410 50 #define MASK_DEF_LVTOFF 0x000000F0 51 #define MASK_DEF_INT_TYPE 0x00000006 52 #define DEF_LVT_OFF 0x2 53 #define DEF_INT_TYPE_APIC 0x2 54 55 /* Scalable MCA: */ 56 57 /* Threshold LVT offset is at MSR0xC0000410[15:12] */ 58 #define SMCA_THR_LVT_OFF 0xF000 59 60 static bool thresholding_irq_en; 61 62 static const char * const th_names[] = { 63 "load_store", 64 "insn_fetch", 65 "combined_unit", 66 "decode_unit", 67 "northbridge", 68 "execution_unit", 69 }; 70 71 static const char * const smca_umc_block_names[] = { 72 "dram_ecc", 73 "misc_umc" 74 }; 75 76 struct smca_bank_name { 77 const char *name; /* Short name for sysfs */ 78 const char *long_name; /* Long name for pretty-printing */ 79 }; 80 81 static struct smca_bank_name smca_names[] = { 82 [SMCA_LS] = { "load_store", "Load Store Unit" }, 83 [SMCA_IF] = { "insn_fetch", "Instruction Fetch Unit" }, 84 [SMCA_L2_CACHE] = { "l2_cache", "L2 Cache" }, 85 [SMCA_DE] = { "decode_unit", "Decode Unit" }, 86 [SMCA_RESERVED] = { "reserved", "Reserved" }, 87 [SMCA_EX] = { "execution_unit", "Execution Unit" }, 88 [SMCA_FP] = { "floating_point", "Floating Point Unit" }, 89 [SMCA_L3_CACHE] = { "l3_cache", "L3 Cache" }, 90 [SMCA_CS] = { "coherent_slave", "Coherent Slave" }, 91 [SMCA_PIE] = { "pie", "Power, Interrupts, etc." }, 92 [SMCA_UMC] = { "umc", "Unified Memory Controller" }, 93 [SMCA_PB] = { "param_block", "Parameter Block" }, 94 [SMCA_PSP] = { "psp", "Platform Security Processor" }, 95 [SMCA_SMU] = { "smu", "System Management Unit" }, 96 }; 97 98 static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init = 99 { 100 [0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 } 101 }; 102 103 static const char *smca_get_name(enum smca_bank_types t) 104 { 105 if (t >= N_SMCA_BANK_TYPES) 106 return NULL; 107 108 return smca_names[t].name; 109 } 110 111 const char *smca_get_long_name(enum smca_bank_types t) 112 { 113 if (t >= N_SMCA_BANK_TYPES) 114 return NULL; 115 116 return smca_names[t].long_name; 117 } 118 EXPORT_SYMBOL_GPL(smca_get_long_name); 119 120 static enum smca_bank_types smca_get_bank_type(unsigned int bank) 121 { 122 struct smca_bank *b; 123 124 if (bank >= MAX_NR_BANKS) 125 return N_SMCA_BANK_TYPES; 126 127 b = &smca_banks[bank]; 128 if (!b->hwid) 129 return N_SMCA_BANK_TYPES; 130 131 return b->hwid->bank_type; 132 } 133 134 static struct smca_hwid smca_hwid_mcatypes[] = { 135 /* { bank_type, hwid_mcatype, xec_bitmap } */ 136 137 /* Reserved type */ 138 { SMCA_RESERVED, HWID_MCATYPE(0x00, 0x0), 0x0 }, 139 140 /* ZN Core (HWID=0xB0) MCA types */ 141 { SMCA_LS, HWID_MCATYPE(0xB0, 0x0), 0x1FFFEF }, 142 { SMCA_IF, HWID_MCATYPE(0xB0, 0x1), 0x3FFF }, 143 { SMCA_L2_CACHE, HWID_MCATYPE(0xB0, 0x2), 0xF }, 144 { SMCA_DE, HWID_MCATYPE(0xB0, 0x3), 0x1FF }, 145 /* HWID 0xB0 MCATYPE 0x4 is Reserved */ 146 { SMCA_EX, HWID_MCATYPE(0xB0, 0x5), 0x7FF }, 147 { SMCA_FP, HWID_MCATYPE(0xB0, 0x6), 0x7F }, 148 { SMCA_L3_CACHE, HWID_MCATYPE(0xB0, 0x7), 0xFF }, 149 150 /* Data Fabric MCA types */ 151 { SMCA_CS, HWID_MCATYPE(0x2E, 0x0), 0x1FF }, 152 { SMCA_PIE, HWID_MCATYPE(0x2E, 0x1), 0xF }, 153 154 /* Unified Memory Controller MCA type */ 155 { SMCA_UMC, HWID_MCATYPE(0x96, 0x0), 0x3F }, 156 157 /* Parameter Block MCA type */ 158 { SMCA_PB, HWID_MCATYPE(0x05, 0x0), 0x1 }, 159 160 /* Platform Security Processor MCA type */ 161 { SMCA_PSP, HWID_MCATYPE(0xFF, 0x0), 0x1 }, 162 163 /* System Management Unit MCA type */ 164 { SMCA_SMU, HWID_MCATYPE(0x01, 0x0), 0x1 }, 165 }; 166 167 struct smca_bank smca_banks[MAX_NR_BANKS]; 168 EXPORT_SYMBOL_GPL(smca_banks); 169 170 /* 171 * In SMCA enabled processors, we can have multiple banks for a given IP type. 172 * So to define a unique name for each bank, we use a temp c-string to append 173 * the MCA_IPID[InstanceId] to type's name in get_name(). 174 * 175 * InstanceId is 32 bits which is 8 characters. Make sure MAX_MCATYPE_NAME_LEN 176 * is greater than 8 plus 1 (for underscore) plus length of longest type name. 177 */ 178 #define MAX_MCATYPE_NAME_LEN 30 179 static char buf_mcatype[MAX_MCATYPE_NAME_LEN]; 180 181 static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks); 182 static DEFINE_PER_CPU(unsigned int, bank_map); /* see which banks are on */ 183 184 static void amd_threshold_interrupt(void); 185 static void amd_deferred_error_interrupt(void); 186 187 static void default_deferred_error_interrupt(void) 188 { 189 pr_err("Unexpected deferred interrupt at vector %x\n", DEFERRED_ERROR_VECTOR); 190 } 191 void (*deferred_error_int_vector)(void) = default_deferred_error_interrupt; 192 193 static void smca_configure(unsigned int bank, unsigned int cpu) 194 { 195 unsigned int i, hwid_mcatype; 196 struct smca_hwid *s_hwid; 197 u32 high, low; 198 u32 smca_config = MSR_AMD64_SMCA_MCx_CONFIG(bank); 199 200 /* Set appropriate bits in MCA_CONFIG */ 201 if (!rdmsr_safe(smca_config, &low, &high)) { 202 /* 203 * OS is required to set the MCAX bit to acknowledge that it is 204 * now using the new MSR ranges and new registers under each 205 * bank. It also means that the OS will configure deferred 206 * errors in the new MCx_CONFIG register. If the bit is not set, 207 * uncorrectable errors will cause a system panic. 208 * 209 * MCA_CONFIG[MCAX] is bit 32 (0 in the high portion of the MSR.) 210 */ 211 high |= BIT(0); 212 213 /* 214 * SMCA sets the Deferred Error Interrupt type per bank. 215 * 216 * MCA_CONFIG[DeferredIntTypeSupported] is bit 5, and tells us 217 * if the DeferredIntType bit field is available. 218 * 219 * MCA_CONFIG[DeferredIntType] is bits [38:37] ([6:5] in the 220 * high portion of the MSR). OS should set this to 0x1 to enable 221 * APIC based interrupt. First, check that no interrupt has been 222 * set. 223 */ 224 if ((low & BIT(5)) && !((high >> 5) & 0x3)) 225 high |= BIT(5); 226 227 wrmsr(smca_config, low, high); 228 } 229 230 /* Return early if this bank was already initialized. */ 231 if (smca_banks[bank].hwid) 232 return; 233 234 if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) { 235 pr_warn("Failed to read MCA_IPID for bank %d\n", bank); 236 return; 237 } 238 239 hwid_mcatype = HWID_MCATYPE(high & MCI_IPID_HWID, 240 (high & MCI_IPID_MCATYPE) >> 16); 241 242 for (i = 0; i < ARRAY_SIZE(smca_hwid_mcatypes); i++) { 243 s_hwid = &smca_hwid_mcatypes[i]; 244 if (hwid_mcatype == s_hwid->hwid_mcatype) { 245 smca_banks[bank].hwid = s_hwid; 246 smca_banks[bank].id = low; 247 smca_banks[bank].sysfs_id = s_hwid->count++; 248 break; 249 } 250 } 251 } 252 253 struct thresh_restart { 254 struct threshold_block *b; 255 int reset; 256 int set_lvt_off; 257 int lvt_off; 258 u16 old_limit; 259 }; 260 261 static inline bool is_shared_bank(int bank) 262 { 263 /* 264 * Scalable MCA provides for only one core to have access to the MSRs of 265 * a shared bank. 266 */ 267 if (mce_flags.smca) 268 return false; 269 270 /* Bank 4 is for northbridge reporting and is thus shared */ 271 return (bank == 4); 272 } 273 274 static const char *bank4_names(const struct threshold_block *b) 275 { 276 switch (b->address) { 277 /* MSR4_MISC0 */ 278 case 0x00000413: 279 return "dram"; 280 281 case 0xc0000408: 282 return "ht_links"; 283 284 case 0xc0000409: 285 return "l3_cache"; 286 287 default: 288 WARN(1, "Funny MSR: 0x%08x\n", b->address); 289 return ""; 290 } 291 }; 292 293 294 static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits) 295 { 296 /* 297 * bank 4 supports APIC LVT interrupts implicitly since forever. 298 */ 299 if (bank == 4) 300 return true; 301 302 /* 303 * IntP: interrupt present; if this bit is set, the thresholding 304 * bank can generate APIC LVT interrupts 305 */ 306 return msr_high_bits & BIT(28); 307 } 308 309 static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi) 310 { 311 int msr = (hi & MASK_LVTOFF_HI) >> 20; 312 313 if (apic < 0) { 314 pr_err(FW_BUG "cpu %d, failed to setup threshold interrupt " 315 "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu, 316 b->bank, b->block, b->address, hi, lo); 317 return 0; 318 } 319 320 if (apic != msr) { 321 /* 322 * On SMCA CPUs, LVT offset is programmed at a different MSR, and 323 * the BIOS provides the value. The original field where LVT offset 324 * was set is reserved. Return early here: 325 */ 326 if (mce_flags.smca) 327 return 0; 328 329 pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d " 330 "for bank %d, block %d (MSR%08X=0x%x%08x)\n", 331 b->cpu, apic, b->bank, b->block, b->address, hi, lo); 332 return 0; 333 } 334 335 return 1; 336 }; 337 338 /* Reprogram MCx_MISC MSR behind this threshold bank. */ 339 static void threshold_restart_bank(void *_tr) 340 { 341 struct thresh_restart *tr = _tr; 342 u32 hi, lo; 343 344 rdmsr(tr->b->address, lo, hi); 345 346 if (tr->b->threshold_limit < (hi & THRESHOLD_MAX)) 347 tr->reset = 1; /* limit cannot be lower than err count */ 348 349 if (tr->reset) { /* reset err count and overflow bit */ 350 hi = 351 (hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) | 352 (THRESHOLD_MAX - tr->b->threshold_limit); 353 } else if (tr->old_limit) { /* change limit w/o reset */ 354 int new_count = (hi & THRESHOLD_MAX) + 355 (tr->old_limit - tr->b->threshold_limit); 356 357 hi = (hi & ~MASK_ERR_COUNT_HI) | 358 (new_count & THRESHOLD_MAX); 359 } 360 361 /* clear IntType */ 362 hi &= ~MASK_INT_TYPE_HI; 363 364 if (!tr->b->interrupt_capable) 365 goto done; 366 367 if (tr->set_lvt_off) { 368 if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) { 369 /* set new lvt offset */ 370 hi &= ~MASK_LVTOFF_HI; 371 hi |= tr->lvt_off << 20; 372 } 373 } 374 375 if (tr->b->interrupt_enable) 376 hi |= INT_TYPE_APIC; 377 378 done: 379 380 hi |= MASK_COUNT_EN_HI; 381 wrmsr(tr->b->address, lo, hi); 382 } 383 384 static void mce_threshold_block_init(struct threshold_block *b, int offset) 385 { 386 struct thresh_restart tr = { 387 .b = b, 388 .set_lvt_off = 1, 389 .lvt_off = offset, 390 }; 391 392 b->threshold_limit = THRESHOLD_MAX; 393 threshold_restart_bank(&tr); 394 }; 395 396 static int setup_APIC_mce_threshold(int reserved, int new) 397 { 398 if (reserved < 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR, 399 APIC_EILVT_MSG_FIX, 0)) 400 return new; 401 402 return reserved; 403 } 404 405 static int setup_APIC_deferred_error(int reserved, int new) 406 { 407 if (reserved < 0 && !setup_APIC_eilvt(new, DEFERRED_ERROR_VECTOR, 408 APIC_EILVT_MSG_FIX, 0)) 409 return new; 410 411 return reserved; 412 } 413 414 static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c) 415 { 416 u32 low = 0, high = 0; 417 int def_offset = -1, def_new; 418 419 if (rdmsr_safe(MSR_CU_DEF_ERR, &low, &high)) 420 return; 421 422 def_new = (low & MASK_DEF_LVTOFF) >> 4; 423 if (!(low & MASK_DEF_LVTOFF)) { 424 pr_err(FW_BUG "Your BIOS is not setting up LVT offset 0x2 for deferred error IRQs correctly.\n"); 425 def_new = DEF_LVT_OFF; 426 low = (low & ~MASK_DEF_LVTOFF) | (DEF_LVT_OFF << 4); 427 } 428 429 def_offset = setup_APIC_deferred_error(def_offset, def_new); 430 if ((def_offset == def_new) && 431 (deferred_error_int_vector != amd_deferred_error_interrupt)) 432 deferred_error_int_vector = amd_deferred_error_interrupt; 433 434 if (!mce_flags.smca) 435 low = (low & ~MASK_DEF_INT_TYPE) | DEF_INT_TYPE_APIC; 436 437 wrmsr(MSR_CU_DEF_ERR, low, high); 438 } 439 440 static u32 smca_get_block_address(unsigned int bank, unsigned int block) 441 { 442 u32 low, high; 443 u32 addr = 0; 444 445 if (smca_get_bank_type(bank) == SMCA_RESERVED) 446 return addr; 447 448 if (!block) 449 return MSR_AMD64_SMCA_MCx_MISC(bank); 450 451 /* Check our cache first: */ 452 if (smca_bank_addrs[bank][block] != -1) 453 return smca_bank_addrs[bank][block]; 454 455 /* 456 * For SMCA enabled processors, BLKPTR field of the first MISC register 457 * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4). 458 */ 459 if (rdmsr_safe(MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high)) 460 goto out; 461 462 if (!(low & MCI_CONFIG_MCAX)) 463 goto out; 464 465 if (!rdmsr_safe(MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) && 466 (low & MASK_BLKPTR_LO)) 467 addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1); 468 469 out: 470 smca_bank_addrs[bank][block] = addr; 471 return addr; 472 } 473 474 static u32 get_block_address(u32 current_addr, u32 low, u32 high, 475 unsigned int bank, unsigned int block) 476 { 477 u32 addr = 0, offset = 0; 478 479 if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS)) 480 return addr; 481 482 if (mce_flags.smca) 483 return smca_get_block_address(bank, block); 484 485 /* Fall back to method we used for older processors: */ 486 switch (block) { 487 case 0: 488 addr = msr_ops.misc(bank); 489 break; 490 case 1: 491 offset = ((low & MASK_BLKPTR_LO) >> 21); 492 if (offset) 493 addr = MCG_XBLK_ADDR + offset; 494 break; 495 default: 496 addr = ++current_addr; 497 } 498 return addr; 499 } 500 501 static int 502 prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr, 503 int offset, u32 misc_high) 504 { 505 unsigned int cpu = smp_processor_id(); 506 u32 smca_low, smca_high; 507 struct threshold_block b; 508 int new; 509 510 if (!block) 511 per_cpu(bank_map, cpu) |= (1 << bank); 512 513 memset(&b, 0, sizeof(b)); 514 b.cpu = cpu; 515 b.bank = bank; 516 b.block = block; 517 b.address = addr; 518 b.interrupt_capable = lvt_interrupt_supported(bank, misc_high); 519 520 if (!b.interrupt_capable) 521 goto done; 522 523 b.interrupt_enable = 1; 524 525 if (!mce_flags.smca) { 526 new = (misc_high & MASK_LVTOFF_HI) >> 20; 527 goto set_offset; 528 } 529 530 /* Gather LVT offset for thresholding: */ 531 if (rdmsr_safe(MSR_CU_DEF_ERR, &smca_low, &smca_high)) 532 goto out; 533 534 new = (smca_low & SMCA_THR_LVT_OFF) >> 12; 535 536 set_offset: 537 offset = setup_APIC_mce_threshold(offset, new); 538 if (offset == new) 539 thresholding_irq_en = true; 540 541 done: 542 mce_threshold_block_init(&b, offset); 543 544 out: 545 return offset; 546 } 547 548 /* 549 * Turn off MC4_MISC thresholding banks on all family 0x15 models since 550 * they're not supported there. 551 */ 552 void disable_err_thresholding(struct cpuinfo_x86 *c) 553 { 554 int i; 555 u64 hwcr; 556 bool need_toggle; 557 u32 msrs[] = { 558 0x00000413, /* MC4_MISC0 */ 559 0xc0000408, /* MC4_MISC1 */ 560 }; 561 562 if (c->x86 != 0x15) 563 return; 564 565 rdmsrl(MSR_K7_HWCR, hwcr); 566 567 /* McStatusWrEn has to be set */ 568 need_toggle = !(hwcr & BIT(18)); 569 570 if (need_toggle) 571 wrmsrl(MSR_K7_HWCR, hwcr | BIT(18)); 572 573 /* Clear CntP bit safely */ 574 for (i = 0; i < ARRAY_SIZE(msrs); i++) 575 msr_clear_bit(msrs[i], 62); 576 577 /* restore old settings */ 578 if (need_toggle) 579 wrmsrl(MSR_K7_HWCR, hwcr); 580 } 581 582 /* cpu init entry point, called from mce.c with preempt off */ 583 void mce_amd_feature_init(struct cpuinfo_x86 *c) 584 { 585 u32 low = 0, high = 0, address = 0; 586 unsigned int bank, block, cpu = smp_processor_id(); 587 int offset = -1; 588 589 disable_err_thresholding(c); 590 591 for (bank = 0; bank < mca_cfg.banks; ++bank) { 592 if (mce_flags.smca) 593 smca_configure(bank, cpu); 594 595 for (block = 0; block < NR_BLOCKS; ++block) { 596 address = get_block_address(address, low, high, bank, block); 597 if (!address) 598 break; 599 600 if (rdmsr_safe(address, &low, &high)) 601 break; 602 603 if (!(high & MASK_VALID_HI)) 604 continue; 605 606 if (!(high & MASK_CNTP_HI) || 607 (high & MASK_LOCKED_HI)) 608 continue; 609 610 offset = prepare_threshold_block(bank, block, address, offset, high); 611 } 612 } 613 614 if (mce_flags.succor) 615 deferred_error_interrupt_enable(c); 616 } 617 618 int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) 619 { 620 u64 dram_base_addr, dram_limit_addr, dram_hole_base; 621 /* We start from the normalized address */ 622 u64 ret_addr = norm_addr; 623 624 u32 tmp; 625 626 u8 die_id_shift, die_id_mask, socket_id_shift, socket_id_mask; 627 u8 intlv_num_dies, intlv_num_chan, intlv_num_sockets; 628 u8 intlv_addr_sel, intlv_addr_bit; 629 u8 num_intlv_bits, hashed_bit; 630 u8 lgcy_mmio_hole_en, base = 0; 631 u8 cs_mask, cs_id = 0; 632 bool hash_enabled = false; 633 634 /* Read D18F0x1B4 (DramOffset), check if base 1 is used. */ 635 if (amd_df_indirect_read(nid, 0, 0x1B4, umc, &tmp)) 636 goto out_err; 637 638 /* Remove HiAddrOffset from normalized address, if enabled: */ 639 if (tmp & BIT(0)) { 640 u64 hi_addr_offset = (tmp & GENMASK_ULL(31, 20)) << 8; 641 642 if (norm_addr >= hi_addr_offset) { 643 ret_addr -= hi_addr_offset; 644 base = 1; 645 } 646 } 647 648 /* Read D18F0x110 (DramBaseAddress). */ 649 if (amd_df_indirect_read(nid, 0, 0x110 + (8 * base), umc, &tmp)) 650 goto out_err; 651 652 /* Check if address range is valid. */ 653 if (!(tmp & BIT(0))) { 654 pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n", 655 __func__, tmp); 656 goto out_err; 657 } 658 659 lgcy_mmio_hole_en = tmp & BIT(1); 660 intlv_num_chan = (tmp >> 4) & 0xF; 661 intlv_addr_sel = (tmp >> 8) & 0x7; 662 dram_base_addr = (tmp & GENMASK_ULL(31, 12)) << 16; 663 664 /* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */ 665 if (intlv_addr_sel > 3) { 666 pr_err("%s: Invalid interleave address select %d.\n", 667 __func__, intlv_addr_sel); 668 goto out_err; 669 } 670 671 /* Read D18F0x114 (DramLimitAddress). */ 672 if (amd_df_indirect_read(nid, 0, 0x114 + (8 * base), umc, &tmp)) 673 goto out_err; 674 675 intlv_num_sockets = (tmp >> 8) & 0x1; 676 intlv_num_dies = (tmp >> 10) & 0x3; 677 dram_limit_addr = ((tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0); 678 679 intlv_addr_bit = intlv_addr_sel + 8; 680 681 /* Re-use intlv_num_chan by setting it equal to log2(#channels) */ 682 switch (intlv_num_chan) { 683 case 0: intlv_num_chan = 0; break; 684 case 1: intlv_num_chan = 1; break; 685 case 3: intlv_num_chan = 2; break; 686 case 5: intlv_num_chan = 3; break; 687 case 7: intlv_num_chan = 4; break; 688 689 case 8: intlv_num_chan = 1; 690 hash_enabled = true; 691 break; 692 default: 693 pr_err("%s: Invalid number of interleaved channels %d.\n", 694 __func__, intlv_num_chan); 695 goto out_err; 696 } 697 698 num_intlv_bits = intlv_num_chan; 699 700 if (intlv_num_dies > 2) { 701 pr_err("%s: Invalid number of interleaved nodes/dies %d.\n", 702 __func__, intlv_num_dies); 703 goto out_err; 704 } 705 706 num_intlv_bits += intlv_num_dies; 707 708 /* Add a bit if sockets are interleaved. */ 709 num_intlv_bits += intlv_num_sockets; 710 711 /* Assert num_intlv_bits <= 4 */ 712 if (num_intlv_bits > 4) { 713 pr_err("%s: Invalid interleave bits %d.\n", 714 __func__, num_intlv_bits); 715 goto out_err; 716 } 717 718 if (num_intlv_bits > 0) { 719 u64 temp_addr_x, temp_addr_i, temp_addr_y; 720 u8 die_id_bit, sock_id_bit, cs_fabric_id; 721 722 /* 723 * Read FabricBlockInstanceInformation3_CS[BlockFabricID]. 724 * This is the fabric id for this coherent slave. Use 725 * umc/channel# as instance id of the coherent slave 726 * for FICAA. 727 */ 728 if (amd_df_indirect_read(nid, 0, 0x50, umc, &tmp)) 729 goto out_err; 730 731 cs_fabric_id = (tmp >> 8) & 0xFF; 732 die_id_bit = 0; 733 734 /* If interleaved over more than 1 channel: */ 735 if (intlv_num_chan) { 736 die_id_bit = intlv_num_chan; 737 cs_mask = (1 << die_id_bit) - 1; 738 cs_id = cs_fabric_id & cs_mask; 739 } 740 741 sock_id_bit = die_id_bit; 742 743 /* Read D18F1x208 (SystemFabricIdMask). */ 744 if (intlv_num_dies || intlv_num_sockets) 745 if (amd_df_indirect_read(nid, 1, 0x208, umc, &tmp)) 746 goto out_err; 747 748 /* If interleaved over more than 1 die. */ 749 if (intlv_num_dies) { 750 sock_id_bit = die_id_bit + intlv_num_dies; 751 die_id_shift = (tmp >> 24) & 0xF; 752 die_id_mask = (tmp >> 8) & 0xFF; 753 754 cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit; 755 } 756 757 /* If interleaved over more than 1 socket. */ 758 if (intlv_num_sockets) { 759 socket_id_shift = (tmp >> 28) & 0xF; 760 socket_id_mask = (tmp >> 16) & 0xFF; 761 762 cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit; 763 } 764 765 /* 766 * The pre-interleaved address consists of XXXXXXIIIYYYYY 767 * where III is the ID for this CS, and XXXXXXYYYYY are the 768 * address bits from the post-interleaved address. 769 * "num_intlv_bits" has been calculated to tell us how many "I" 770 * bits there are. "intlv_addr_bit" tells us how many "Y" bits 771 * there are (where "I" starts). 772 */ 773 temp_addr_y = ret_addr & GENMASK_ULL(intlv_addr_bit-1, 0); 774 temp_addr_i = (cs_id << intlv_addr_bit); 775 temp_addr_x = (ret_addr & GENMASK_ULL(63, intlv_addr_bit)) << num_intlv_bits; 776 ret_addr = temp_addr_x | temp_addr_i | temp_addr_y; 777 } 778 779 /* Add dram base address */ 780 ret_addr += dram_base_addr; 781 782 /* If legacy MMIO hole enabled */ 783 if (lgcy_mmio_hole_en) { 784 if (amd_df_indirect_read(nid, 0, 0x104, umc, &tmp)) 785 goto out_err; 786 787 dram_hole_base = tmp & GENMASK(31, 24); 788 if (ret_addr >= dram_hole_base) 789 ret_addr += (BIT_ULL(32) - dram_hole_base); 790 } 791 792 if (hash_enabled) { 793 /* Save some parentheses and grab ls-bit at the end. */ 794 hashed_bit = (ret_addr >> 12) ^ 795 (ret_addr >> 18) ^ 796 (ret_addr >> 21) ^ 797 (ret_addr >> 30) ^ 798 cs_id; 799 800 hashed_bit &= BIT(0); 801 802 if (hashed_bit != ((ret_addr >> intlv_addr_bit) & BIT(0))) 803 ret_addr ^= BIT(intlv_addr_bit); 804 } 805 806 /* Is calculated system address is above DRAM limit address? */ 807 if (ret_addr > dram_limit_addr) 808 goto out_err; 809 810 *sys_addr = ret_addr; 811 return 0; 812 813 out_err: 814 return -EINVAL; 815 } 816 EXPORT_SYMBOL_GPL(umc_normaddr_to_sysaddr); 817 818 bool amd_mce_is_memory_error(struct mce *m) 819 { 820 /* ErrCodeExt[20:16] */ 821 u8 xec = (m->status >> 16) & 0x1f; 822 823 if (mce_flags.smca) 824 return smca_get_bank_type(m->bank) == SMCA_UMC && xec == 0x0; 825 826 return m->bank == 4 && xec == 0x8; 827 } 828 829 static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc) 830 { 831 struct mce m; 832 833 mce_setup(&m); 834 835 m.status = status; 836 m.misc = misc; 837 m.bank = bank; 838 m.tsc = rdtsc(); 839 840 if (m.status & MCI_STATUS_ADDRV) { 841 m.addr = addr; 842 843 /* 844 * Extract [55:<lsb>] where lsb is the least significant 845 * *valid* bit of the address bits. 846 */ 847 if (mce_flags.smca) { 848 u8 lsb = (m.addr >> 56) & 0x3f; 849 850 m.addr &= GENMASK_ULL(55, lsb); 851 } 852 } 853 854 if (mce_flags.smca) { 855 rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank), m.ipid); 856 857 if (m.status & MCI_STATUS_SYNDV) 858 rdmsrl(MSR_AMD64_SMCA_MCx_SYND(bank), m.synd); 859 } 860 861 mce_log(&m); 862 } 863 864 asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(struct pt_regs *regs) 865 { 866 entering_irq(); 867 trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR); 868 inc_irq_stat(irq_deferred_error_count); 869 deferred_error_int_vector(); 870 trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR); 871 exiting_ack_irq(); 872 } 873 874 /* 875 * Returns true if the logged error is deferred. False, otherwise. 876 */ 877 static inline bool 878 _log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc) 879 { 880 u64 status, addr = 0; 881 882 rdmsrl(msr_stat, status); 883 if (!(status & MCI_STATUS_VAL)) 884 return false; 885 886 if (status & MCI_STATUS_ADDRV) 887 rdmsrl(msr_addr, addr); 888 889 __log_error(bank, status, addr, misc); 890 891 wrmsrl(msr_stat, 0); 892 893 return status & MCI_STATUS_DEFERRED; 894 } 895 896 /* 897 * We have three scenarios for checking for Deferred errors: 898 * 899 * 1) Non-SMCA systems check MCA_STATUS and log error if found. 900 * 2) SMCA systems check MCA_STATUS. If error is found then log it and also 901 * clear MCA_DESTAT. 902 * 3) SMCA systems check MCA_DESTAT, if error was not found in MCA_STATUS, and 903 * log it. 904 */ 905 static void log_error_deferred(unsigned int bank) 906 { 907 bool defrd; 908 909 defrd = _log_error_bank(bank, msr_ops.status(bank), 910 msr_ops.addr(bank), 0); 911 912 if (!mce_flags.smca) 913 return; 914 915 /* Clear MCA_DESTAT if we logged the deferred error from MCA_STATUS. */ 916 if (defrd) { 917 wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0); 918 return; 919 } 920 921 /* 922 * Only deferred errors are logged in MCA_DE{STAT,ADDR} so just check 923 * for a valid error. 924 */ 925 _log_error_bank(bank, MSR_AMD64_SMCA_MCx_DESTAT(bank), 926 MSR_AMD64_SMCA_MCx_DEADDR(bank), 0); 927 } 928 929 /* APIC interrupt handler for deferred errors */ 930 static void amd_deferred_error_interrupt(void) 931 { 932 unsigned int bank; 933 934 for (bank = 0; bank < mca_cfg.banks; ++bank) 935 log_error_deferred(bank); 936 } 937 938 static void log_error_thresholding(unsigned int bank, u64 misc) 939 { 940 _log_error_bank(bank, msr_ops.status(bank), msr_ops.addr(bank), misc); 941 } 942 943 static void log_and_reset_block(struct threshold_block *block) 944 { 945 struct thresh_restart tr; 946 u32 low = 0, high = 0; 947 948 if (!block) 949 return; 950 951 if (rdmsr_safe(block->address, &low, &high)) 952 return; 953 954 if (!(high & MASK_OVERFLOW_HI)) 955 return; 956 957 /* Log the MCE which caused the threshold event. */ 958 log_error_thresholding(block->bank, ((u64)high << 32) | low); 959 960 /* Reset threshold block after logging error. */ 961 memset(&tr, 0, sizeof(tr)); 962 tr.b = block; 963 threshold_restart_bank(&tr); 964 } 965 966 /* 967 * Threshold interrupt handler will service THRESHOLD_APIC_VECTOR. The interrupt 968 * goes off when error_count reaches threshold_limit. 969 */ 970 static void amd_threshold_interrupt(void) 971 { 972 struct threshold_block *first_block = NULL, *block = NULL, *tmp = NULL; 973 unsigned int bank, cpu = smp_processor_id(); 974 975 for (bank = 0; bank < mca_cfg.banks; ++bank) { 976 if (!(per_cpu(bank_map, cpu) & (1 << bank))) 977 continue; 978 979 first_block = per_cpu(threshold_banks, cpu)[bank]->blocks; 980 if (!first_block) 981 continue; 982 983 /* 984 * The first block is also the head of the list. Check it first 985 * before iterating over the rest. 986 */ 987 log_and_reset_block(first_block); 988 list_for_each_entry_safe(block, tmp, &first_block->miscj, miscj) 989 log_and_reset_block(block); 990 } 991 } 992 993 /* 994 * Sysfs Interface 995 */ 996 997 struct threshold_attr { 998 struct attribute attr; 999 ssize_t (*show) (struct threshold_block *, char *); 1000 ssize_t (*store) (struct threshold_block *, const char *, size_t count); 1001 }; 1002 1003 #define SHOW_FIELDS(name) \ 1004 static ssize_t show_ ## name(struct threshold_block *b, char *buf) \ 1005 { \ 1006 return sprintf(buf, "%lu\n", (unsigned long) b->name); \ 1007 } 1008 SHOW_FIELDS(interrupt_enable) 1009 SHOW_FIELDS(threshold_limit) 1010 1011 static ssize_t 1012 store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size) 1013 { 1014 struct thresh_restart tr; 1015 unsigned long new; 1016 1017 if (!b->interrupt_capable) 1018 return -EINVAL; 1019 1020 if (kstrtoul(buf, 0, &new) < 0) 1021 return -EINVAL; 1022 1023 b->interrupt_enable = !!new; 1024 1025 memset(&tr, 0, sizeof(tr)); 1026 tr.b = b; 1027 1028 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); 1029 1030 return size; 1031 } 1032 1033 static ssize_t 1034 store_threshold_limit(struct threshold_block *b, const char *buf, size_t size) 1035 { 1036 struct thresh_restart tr; 1037 unsigned long new; 1038 1039 if (kstrtoul(buf, 0, &new) < 0) 1040 return -EINVAL; 1041 1042 if (new > THRESHOLD_MAX) 1043 new = THRESHOLD_MAX; 1044 if (new < 1) 1045 new = 1; 1046 1047 memset(&tr, 0, sizeof(tr)); 1048 tr.old_limit = b->threshold_limit; 1049 b->threshold_limit = new; 1050 tr.b = b; 1051 1052 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); 1053 1054 return size; 1055 } 1056 1057 static ssize_t show_error_count(struct threshold_block *b, char *buf) 1058 { 1059 u32 lo, hi; 1060 1061 rdmsr_on_cpu(b->cpu, b->address, &lo, &hi); 1062 1063 return sprintf(buf, "%u\n", ((hi & THRESHOLD_MAX) - 1064 (THRESHOLD_MAX - b->threshold_limit))); 1065 } 1066 1067 static struct threshold_attr error_count = { 1068 .attr = {.name = __stringify(error_count), .mode = 0444 }, 1069 .show = show_error_count, 1070 }; 1071 1072 #define RW_ATTR(val) \ 1073 static struct threshold_attr val = { \ 1074 .attr = {.name = __stringify(val), .mode = 0644 }, \ 1075 .show = show_## val, \ 1076 .store = store_## val, \ 1077 }; 1078 1079 RW_ATTR(interrupt_enable); 1080 RW_ATTR(threshold_limit); 1081 1082 static struct attribute *default_attrs[] = { 1083 &threshold_limit.attr, 1084 &error_count.attr, 1085 NULL, /* possibly interrupt_enable if supported, see below */ 1086 NULL, 1087 }; 1088 1089 #define to_block(k) container_of(k, struct threshold_block, kobj) 1090 #define to_attr(a) container_of(a, struct threshold_attr, attr) 1091 1092 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) 1093 { 1094 struct threshold_block *b = to_block(kobj); 1095 struct threshold_attr *a = to_attr(attr); 1096 ssize_t ret; 1097 1098 ret = a->show ? a->show(b, buf) : -EIO; 1099 1100 return ret; 1101 } 1102 1103 static ssize_t store(struct kobject *kobj, struct attribute *attr, 1104 const char *buf, size_t count) 1105 { 1106 struct threshold_block *b = to_block(kobj); 1107 struct threshold_attr *a = to_attr(attr); 1108 ssize_t ret; 1109 1110 ret = a->store ? a->store(b, buf, count) : -EIO; 1111 1112 return ret; 1113 } 1114 1115 static const struct sysfs_ops threshold_ops = { 1116 .show = show, 1117 .store = store, 1118 }; 1119 1120 static struct kobj_type threshold_ktype = { 1121 .sysfs_ops = &threshold_ops, 1122 .default_attrs = default_attrs, 1123 }; 1124 1125 static const char *get_name(unsigned int bank, struct threshold_block *b) 1126 { 1127 enum smca_bank_types bank_type; 1128 1129 if (!mce_flags.smca) { 1130 if (b && bank == 4) 1131 return bank4_names(b); 1132 1133 return th_names[bank]; 1134 } 1135 1136 bank_type = smca_get_bank_type(bank); 1137 if (bank_type >= N_SMCA_BANK_TYPES) 1138 return NULL; 1139 1140 if (b && bank_type == SMCA_UMC) { 1141 if (b->block < ARRAY_SIZE(smca_umc_block_names)) 1142 return smca_umc_block_names[b->block]; 1143 return NULL; 1144 } 1145 1146 if (smca_banks[bank].hwid->count == 1) 1147 return smca_get_name(bank_type); 1148 1149 snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN, 1150 "%s_%x", smca_get_name(bank_type), 1151 smca_banks[bank].sysfs_id); 1152 return buf_mcatype; 1153 } 1154 1155 static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank, 1156 unsigned int block, u32 address) 1157 { 1158 struct threshold_block *b = NULL; 1159 u32 low, high; 1160 int err; 1161 1162 if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS)) 1163 return 0; 1164 1165 if (rdmsr_safe_on_cpu(cpu, address, &low, &high)) 1166 return 0; 1167 1168 if (!(high & MASK_VALID_HI)) { 1169 if (block) 1170 goto recurse; 1171 else 1172 return 0; 1173 } 1174 1175 if (!(high & MASK_CNTP_HI) || 1176 (high & MASK_LOCKED_HI)) 1177 goto recurse; 1178 1179 b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL); 1180 if (!b) 1181 return -ENOMEM; 1182 1183 b->block = block; 1184 b->bank = bank; 1185 b->cpu = cpu; 1186 b->address = address; 1187 b->interrupt_enable = 0; 1188 b->interrupt_capable = lvt_interrupt_supported(bank, high); 1189 b->threshold_limit = THRESHOLD_MAX; 1190 1191 if (b->interrupt_capable) { 1192 threshold_ktype.default_attrs[2] = &interrupt_enable.attr; 1193 b->interrupt_enable = 1; 1194 } else { 1195 threshold_ktype.default_attrs[2] = NULL; 1196 } 1197 1198 INIT_LIST_HEAD(&b->miscj); 1199 1200 if (per_cpu(threshold_banks, cpu)[bank]->blocks) { 1201 list_add(&b->miscj, 1202 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj); 1203 } else { 1204 per_cpu(threshold_banks, cpu)[bank]->blocks = b; 1205 } 1206 1207 err = kobject_init_and_add(&b->kobj, &threshold_ktype, 1208 per_cpu(threshold_banks, cpu)[bank]->kobj, 1209 get_name(bank, b)); 1210 if (err) 1211 goto out_free; 1212 recurse: 1213 address = get_block_address(address, low, high, bank, ++block); 1214 if (!address) 1215 return 0; 1216 1217 err = allocate_threshold_blocks(cpu, bank, block, address); 1218 if (err) 1219 goto out_free; 1220 1221 if (b) 1222 kobject_uevent(&b->kobj, KOBJ_ADD); 1223 1224 return err; 1225 1226 out_free: 1227 if (b) { 1228 kobject_put(&b->kobj); 1229 list_del(&b->miscj); 1230 kfree(b); 1231 } 1232 return err; 1233 } 1234 1235 static int __threshold_add_blocks(struct threshold_bank *b) 1236 { 1237 struct list_head *head = &b->blocks->miscj; 1238 struct threshold_block *pos = NULL; 1239 struct threshold_block *tmp = NULL; 1240 int err = 0; 1241 1242 err = kobject_add(&b->blocks->kobj, b->kobj, b->blocks->kobj.name); 1243 if (err) 1244 return err; 1245 1246 list_for_each_entry_safe(pos, tmp, head, miscj) { 1247 1248 err = kobject_add(&pos->kobj, b->kobj, pos->kobj.name); 1249 if (err) { 1250 list_for_each_entry_safe_reverse(pos, tmp, head, miscj) 1251 kobject_del(&pos->kobj); 1252 1253 return err; 1254 } 1255 } 1256 return err; 1257 } 1258 1259 static int threshold_create_bank(unsigned int cpu, unsigned int bank) 1260 { 1261 struct device *dev = per_cpu(mce_device, cpu); 1262 struct amd_northbridge *nb = NULL; 1263 struct threshold_bank *b = NULL; 1264 const char *name = get_name(bank, NULL); 1265 int err = 0; 1266 1267 if (!dev) 1268 return -ENODEV; 1269 1270 if (is_shared_bank(bank)) { 1271 nb = node_to_amd_nb(amd_get_nb_id(cpu)); 1272 1273 /* threshold descriptor already initialized on this node? */ 1274 if (nb && nb->bank4) { 1275 /* yes, use it */ 1276 b = nb->bank4; 1277 err = kobject_add(b->kobj, &dev->kobj, name); 1278 if (err) 1279 goto out; 1280 1281 per_cpu(threshold_banks, cpu)[bank] = b; 1282 refcount_inc(&b->cpus); 1283 1284 err = __threshold_add_blocks(b); 1285 1286 goto out; 1287 } 1288 } 1289 1290 b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL); 1291 if (!b) { 1292 err = -ENOMEM; 1293 goto out; 1294 } 1295 1296 b->kobj = kobject_create_and_add(name, &dev->kobj); 1297 if (!b->kobj) { 1298 err = -EINVAL; 1299 goto out_free; 1300 } 1301 1302 per_cpu(threshold_banks, cpu)[bank] = b; 1303 1304 if (is_shared_bank(bank)) { 1305 refcount_set(&b->cpus, 1); 1306 1307 /* nb is already initialized, see above */ 1308 if (nb) { 1309 WARN_ON(nb->bank4); 1310 nb->bank4 = b; 1311 } 1312 } 1313 1314 err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank)); 1315 if (!err) 1316 goto out; 1317 1318 out_free: 1319 kfree(b); 1320 1321 out: 1322 return err; 1323 } 1324 1325 static void deallocate_threshold_block(unsigned int cpu, 1326 unsigned int bank) 1327 { 1328 struct threshold_block *pos = NULL; 1329 struct threshold_block *tmp = NULL; 1330 struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank]; 1331 1332 if (!head) 1333 return; 1334 1335 list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) { 1336 kobject_put(&pos->kobj); 1337 list_del(&pos->miscj); 1338 kfree(pos); 1339 } 1340 1341 kfree(per_cpu(threshold_banks, cpu)[bank]->blocks); 1342 per_cpu(threshold_banks, cpu)[bank]->blocks = NULL; 1343 } 1344 1345 static void __threshold_remove_blocks(struct threshold_bank *b) 1346 { 1347 struct threshold_block *pos = NULL; 1348 struct threshold_block *tmp = NULL; 1349 1350 kobject_del(b->kobj); 1351 1352 list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj) 1353 kobject_del(&pos->kobj); 1354 } 1355 1356 static void threshold_remove_bank(unsigned int cpu, int bank) 1357 { 1358 struct amd_northbridge *nb; 1359 struct threshold_bank *b; 1360 1361 b = per_cpu(threshold_banks, cpu)[bank]; 1362 if (!b) 1363 return; 1364 1365 if (!b->blocks) 1366 goto free_out; 1367 1368 if (is_shared_bank(bank)) { 1369 if (!refcount_dec_and_test(&b->cpus)) { 1370 __threshold_remove_blocks(b); 1371 per_cpu(threshold_banks, cpu)[bank] = NULL; 1372 return; 1373 } else { 1374 /* 1375 * the last CPU on this node using the shared bank is 1376 * going away, remove that bank now. 1377 */ 1378 nb = node_to_amd_nb(amd_get_nb_id(cpu)); 1379 nb->bank4 = NULL; 1380 } 1381 } 1382 1383 deallocate_threshold_block(cpu, bank); 1384 1385 free_out: 1386 kobject_del(b->kobj); 1387 kobject_put(b->kobj); 1388 kfree(b); 1389 per_cpu(threshold_banks, cpu)[bank] = NULL; 1390 } 1391 1392 int mce_threshold_remove_device(unsigned int cpu) 1393 { 1394 unsigned int bank; 1395 1396 for (bank = 0; bank < mca_cfg.banks; ++bank) { 1397 if (!(per_cpu(bank_map, cpu) & (1 << bank))) 1398 continue; 1399 threshold_remove_bank(cpu, bank); 1400 } 1401 kfree(per_cpu(threshold_banks, cpu)); 1402 per_cpu(threshold_banks, cpu) = NULL; 1403 return 0; 1404 } 1405 1406 /* create dir/files for all valid threshold banks */ 1407 int mce_threshold_create_device(unsigned int cpu) 1408 { 1409 unsigned int bank; 1410 struct threshold_bank **bp; 1411 int err = 0; 1412 1413 bp = per_cpu(threshold_banks, cpu); 1414 if (bp) 1415 return 0; 1416 1417 bp = kcalloc(mca_cfg.banks, sizeof(struct threshold_bank *), 1418 GFP_KERNEL); 1419 if (!bp) 1420 return -ENOMEM; 1421 1422 per_cpu(threshold_banks, cpu) = bp; 1423 1424 for (bank = 0; bank < mca_cfg.banks; ++bank) { 1425 if (!(per_cpu(bank_map, cpu) & (1 << bank))) 1426 continue; 1427 err = threshold_create_bank(cpu, bank); 1428 if (err) 1429 goto err; 1430 } 1431 return err; 1432 err: 1433 mce_threshold_remove_device(cpu); 1434 return err; 1435 } 1436 1437 static __init int threshold_init_device(void) 1438 { 1439 unsigned lcpu = 0; 1440 1441 /* to hit CPUs online before the notifier is up */ 1442 for_each_online_cpu(lcpu) { 1443 int err = mce_threshold_create_device(lcpu); 1444 1445 if (err) 1446 return err; 1447 } 1448 1449 if (thresholding_irq_en) 1450 mce_threshold_vector = amd_threshold_interrupt; 1451 1452 return 0; 1453 } 1454 /* 1455 * there are 3 funcs which need to be _initcalled in a logic sequence: 1456 * 1. xen_late_init_mcelog 1457 * 2. mcheck_init_device 1458 * 3. threshold_init_device 1459 * 1460 * xen_late_init_mcelog must register xen_mce_chrdev_device before 1461 * native mce_chrdev_device registration if running under xen platform; 1462 * 1463 * mcheck_init_device should be inited before threshold_init_device to 1464 * initialize mce_device, otherwise a NULL ptr dereference will cause panic. 1465 * 1466 * so we use following _initcalls 1467 * 1. device_initcall(xen_late_init_mcelog); 1468 * 2. device_initcall_sync(mcheck_init_device); 1469 * 3. late_initcall(threshold_init_device); 1470 * 1471 * when running under xen, the initcall order is 1,2,3; 1472 * on baremetal, we skip 1 and we do only 2 and 3. 1473 */ 1474 late_initcall(threshold_init_device); 1475