1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * (c) 2005-2016 Advanced Micro Devices, Inc. 4 * 5 * Written by Jacob Shin - AMD, Inc. 6 * Maintained by: Borislav Petkov <bp@alien8.de> 7 * 8 * All MC4_MISCi registers are shared between cores on a node. 9 */ 10 #include <linux/interrupt.h> 11 #include <linux/notifier.h> 12 #include <linux/kobject.h> 13 #include <linux/percpu.h> 14 #include <linux/errno.h> 15 #include <linux/sched.h> 16 #include <linux/sysfs.h> 17 #include <linux/slab.h> 18 #include <linux/init.h> 19 #include <linux/cpu.h> 20 #include <linux/smp.h> 21 #include <linux/string.h> 22 23 #include <asm/amd_nb.h> 24 #include <asm/traps.h> 25 #include <asm/apic.h> 26 #include <asm/mce.h> 27 #include <asm/msr.h> 28 #include <asm/trace/irq_vectors.h> 29 30 #include "internal.h" 31 32 #define NR_BLOCKS 5 33 #define THRESHOLD_MAX 0xFFF 34 #define INT_TYPE_APIC 0x00020000 35 #define MASK_VALID_HI 0x80000000 36 #define MASK_CNTP_HI 0x40000000 37 #define MASK_LOCKED_HI 0x20000000 38 #define MASK_LVTOFF_HI 0x00F00000 39 #define MASK_COUNT_EN_HI 0x00080000 40 #define MASK_INT_TYPE_HI 0x00060000 41 #define MASK_OVERFLOW_HI 0x00010000 42 #define MASK_ERR_COUNT_HI 0x00000FFF 43 #define MASK_BLKPTR_LO 0xFF000000 44 #define MCG_XBLK_ADDR 0xC0000400 45 46 /* Deferred error settings */ 47 #define MSR_CU_DEF_ERR 0xC0000410 48 #define MASK_DEF_LVTOFF 0x000000F0 49 #define MASK_DEF_INT_TYPE 0x00000006 50 #define DEF_LVT_OFF 0x2 51 #define DEF_INT_TYPE_APIC 0x2 52 53 /* Scalable MCA: */ 54 55 /* Threshold LVT offset is at MSR0xC0000410[15:12] */ 56 #define SMCA_THR_LVT_OFF 0xF000 57 58 static bool thresholding_irq_en; 59 60 static const char * const th_names[] = { 61 "load_store", 62 "insn_fetch", 63 "combined_unit", 64 "decode_unit", 65 "northbridge", 66 "execution_unit", 67 }; 68 69 static const char * const smca_umc_block_names[] = { 70 "dram_ecc", 71 "misc_umc" 72 }; 73 74 struct smca_bank_name { 75 const char *name; /* Short name for sysfs */ 76 const char *long_name; /* Long name for pretty-printing */ 77 }; 78 79 static struct smca_bank_name smca_names[] = { 80 [SMCA_LS] = { "load_store", "Load Store Unit" }, 81 [SMCA_IF] = { "insn_fetch", "Instruction Fetch Unit" }, 82 [SMCA_L2_CACHE] = { "l2_cache", "L2 Cache" }, 83 [SMCA_DE] = { "decode_unit", "Decode Unit" }, 84 [SMCA_RESERVED] = { "reserved", "Reserved" }, 85 [SMCA_EX] = { "execution_unit", "Execution Unit" }, 86 [SMCA_FP] = { "floating_point", "Floating Point Unit" }, 87 [SMCA_L3_CACHE] = { "l3_cache", "L3 Cache" }, 88 [SMCA_CS] = { "coherent_slave", "Coherent Slave" }, 89 [SMCA_CS_V2] = { "coherent_slave", "Coherent Slave" }, 90 [SMCA_PIE] = { "pie", "Power, Interrupts, etc." }, 91 [SMCA_UMC] = { "umc", "Unified Memory Controller" }, 92 [SMCA_PB] = { "param_block", "Parameter Block" }, 93 [SMCA_PSP] = { "psp", "Platform Security Processor" }, 94 [SMCA_PSP_V2] = { "psp", "Platform Security Processor" }, 95 [SMCA_SMU] = { "smu", "System Management Unit" }, 96 [SMCA_SMU_V2] = { "smu", "System Management Unit" }, 97 [SMCA_MP5] = { "mp5", "Microprocessor 5 Unit" }, 98 [SMCA_NBIO] = { "nbio", "Northbridge IO Unit" }, 99 [SMCA_PCIE] = { "pcie", "PCI Express Unit" }, 100 }; 101 102 static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init = 103 { 104 [0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 } 105 }; 106 107 static const char *smca_get_name(enum smca_bank_types t) 108 { 109 if (t >= N_SMCA_BANK_TYPES) 110 return NULL; 111 112 return smca_names[t].name; 113 } 114 115 const char *smca_get_long_name(enum smca_bank_types t) 116 { 117 if (t >= N_SMCA_BANK_TYPES) 118 return NULL; 119 120 return smca_names[t].long_name; 121 } 122 EXPORT_SYMBOL_GPL(smca_get_long_name); 123 124 static enum smca_bank_types smca_get_bank_type(unsigned int bank) 125 { 126 struct smca_bank *b; 127 128 if (bank >= MAX_NR_BANKS) 129 return N_SMCA_BANK_TYPES; 130 131 b = &smca_banks[bank]; 132 if (!b->hwid) 133 return N_SMCA_BANK_TYPES; 134 135 return b->hwid->bank_type; 136 } 137 138 static struct smca_hwid smca_hwid_mcatypes[] = { 139 /* { bank_type, hwid_mcatype, xec_bitmap } */ 140 141 /* Reserved type */ 142 { SMCA_RESERVED, HWID_MCATYPE(0x00, 0x0), 0x0 }, 143 144 /* ZN Core (HWID=0xB0) MCA types */ 145 { SMCA_LS, HWID_MCATYPE(0xB0, 0x0), 0x1FFFFF }, 146 { SMCA_IF, HWID_MCATYPE(0xB0, 0x1), 0x3FFF }, 147 { SMCA_L2_CACHE, HWID_MCATYPE(0xB0, 0x2), 0xF }, 148 { SMCA_DE, HWID_MCATYPE(0xB0, 0x3), 0x1FF }, 149 /* HWID 0xB0 MCATYPE 0x4 is Reserved */ 150 { SMCA_EX, HWID_MCATYPE(0xB0, 0x5), 0xFFF }, 151 { SMCA_FP, HWID_MCATYPE(0xB0, 0x6), 0x7F }, 152 { SMCA_L3_CACHE, HWID_MCATYPE(0xB0, 0x7), 0xFF }, 153 154 /* Data Fabric MCA types */ 155 { SMCA_CS, HWID_MCATYPE(0x2E, 0x0), 0x1FF }, 156 { SMCA_PIE, HWID_MCATYPE(0x2E, 0x1), 0x1F }, 157 { SMCA_CS_V2, HWID_MCATYPE(0x2E, 0x2), 0x3FFF }, 158 159 /* Unified Memory Controller MCA type */ 160 { SMCA_UMC, HWID_MCATYPE(0x96, 0x0), 0xFF }, 161 162 /* Parameter Block MCA type */ 163 { SMCA_PB, HWID_MCATYPE(0x05, 0x0), 0x1 }, 164 165 /* Platform Security Processor MCA type */ 166 { SMCA_PSP, HWID_MCATYPE(0xFF, 0x0), 0x1 }, 167 { SMCA_PSP_V2, HWID_MCATYPE(0xFF, 0x1), 0x3FFFF }, 168 169 /* System Management Unit MCA type */ 170 { SMCA_SMU, HWID_MCATYPE(0x01, 0x0), 0x1 }, 171 { SMCA_SMU_V2, HWID_MCATYPE(0x01, 0x1), 0x7FF }, 172 173 /* Microprocessor 5 Unit MCA type */ 174 { SMCA_MP5, HWID_MCATYPE(0x01, 0x2), 0x3FF }, 175 176 /* Northbridge IO Unit MCA type */ 177 { SMCA_NBIO, HWID_MCATYPE(0x18, 0x0), 0x1F }, 178 179 /* PCI Express Unit MCA type */ 180 { SMCA_PCIE, HWID_MCATYPE(0x46, 0x0), 0x1F }, 181 }; 182 183 struct smca_bank smca_banks[MAX_NR_BANKS]; 184 EXPORT_SYMBOL_GPL(smca_banks); 185 186 /* 187 * In SMCA enabled processors, we can have multiple banks for a given IP type. 188 * So to define a unique name for each bank, we use a temp c-string to append 189 * the MCA_IPID[InstanceId] to type's name in get_name(). 190 * 191 * InstanceId is 32 bits which is 8 characters. Make sure MAX_MCATYPE_NAME_LEN 192 * is greater than 8 plus 1 (for underscore) plus length of longest type name. 193 */ 194 #define MAX_MCATYPE_NAME_LEN 30 195 static char buf_mcatype[MAX_MCATYPE_NAME_LEN]; 196 197 static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks); 198 static DEFINE_PER_CPU(unsigned int, bank_map); /* see which banks are on */ 199 200 static void amd_threshold_interrupt(void); 201 static void amd_deferred_error_interrupt(void); 202 203 static void default_deferred_error_interrupt(void) 204 { 205 pr_err("Unexpected deferred interrupt at vector %x\n", DEFERRED_ERROR_VECTOR); 206 } 207 void (*deferred_error_int_vector)(void) = default_deferred_error_interrupt; 208 209 static void smca_configure(unsigned int bank, unsigned int cpu) 210 { 211 unsigned int i, hwid_mcatype; 212 struct smca_hwid *s_hwid; 213 u32 high, low; 214 u32 smca_config = MSR_AMD64_SMCA_MCx_CONFIG(bank); 215 216 /* Set appropriate bits in MCA_CONFIG */ 217 if (!rdmsr_safe(smca_config, &low, &high)) { 218 /* 219 * OS is required to set the MCAX bit to acknowledge that it is 220 * now using the new MSR ranges and new registers under each 221 * bank. It also means that the OS will configure deferred 222 * errors in the new MCx_CONFIG register. If the bit is not set, 223 * uncorrectable errors will cause a system panic. 224 * 225 * MCA_CONFIG[MCAX] is bit 32 (0 in the high portion of the MSR.) 226 */ 227 high |= BIT(0); 228 229 /* 230 * SMCA sets the Deferred Error Interrupt type per bank. 231 * 232 * MCA_CONFIG[DeferredIntTypeSupported] is bit 5, and tells us 233 * if the DeferredIntType bit field is available. 234 * 235 * MCA_CONFIG[DeferredIntType] is bits [38:37] ([6:5] in the 236 * high portion of the MSR). OS should set this to 0x1 to enable 237 * APIC based interrupt. First, check that no interrupt has been 238 * set. 239 */ 240 if ((low & BIT(5)) && !((high >> 5) & 0x3)) 241 high |= BIT(5); 242 243 wrmsr(smca_config, low, high); 244 } 245 246 /* Return early if this bank was already initialized. */ 247 if (smca_banks[bank].hwid) 248 return; 249 250 if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) { 251 pr_warn("Failed to read MCA_IPID for bank %d\n", bank); 252 return; 253 } 254 255 hwid_mcatype = HWID_MCATYPE(high & MCI_IPID_HWID, 256 (high & MCI_IPID_MCATYPE) >> 16); 257 258 for (i = 0; i < ARRAY_SIZE(smca_hwid_mcatypes); i++) { 259 s_hwid = &smca_hwid_mcatypes[i]; 260 if (hwid_mcatype == s_hwid->hwid_mcatype) { 261 smca_banks[bank].hwid = s_hwid; 262 smca_banks[bank].id = low; 263 smca_banks[bank].sysfs_id = s_hwid->count++; 264 break; 265 } 266 } 267 } 268 269 struct thresh_restart { 270 struct threshold_block *b; 271 int reset; 272 int set_lvt_off; 273 int lvt_off; 274 u16 old_limit; 275 }; 276 277 static inline bool is_shared_bank(int bank) 278 { 279 /* 280 * Scalable MCA provides for only one core to have access to the MSRs of 281 * a shared bank. 282 */ 283 if (mce_flags.smca) 284 return false; 285 286 /* Bank 4 is for northbridge reporting and is thus shared */ 287 return (bank == 4); 288 } 289 290 static const char *bank4_names(const struct threshold_block *b) 291 { 292 switch (b->address) { 293 /* MSR4_MISC0 */ 294 case 0x00000413: 295 return "dram"; 296 297 case 0xc0000408: 298 return "ht_links"; 299 300 case 0xc0000409: 301 return "l3_cache"; 302 303 default: 304 WARN(1, "Funny MSR: 0x%08x\n", b->address); 305 return ""; 306 } 307 }; 308 309 310 static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits) 311 { 312 /* 313 * bank 4 supports APIC LVT interrupts implicitly since forever. 314 */ 315 if (bank == 4) 316 return true; 317 318 /* 319 * IntP: interrupt present; if this bit is set, the thresholding 320 * bank can generate APIC LVT interrupts 321 */ 322 return msr_high_bits & BIT(28); 323 } 324 325 static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi) 326 { 327 int msr = (hi & MASK_LVTOFF_HI) >> 20; 328 329 if (apic < 0) { 330 pr_err(FW_BUG "cpu %d, failed to setup threshold interrupt " 331 "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu, 332 b->bank, b->block, b->address, hi, lo); 333 return 0; 334 } 335 336 if (apic != msr) { 337 /* 338 * On SMCA CPUs, LVT offset is programmed at a different MSR, and 339 * the BIOS provides the value. The original field where LVT offset 340 * was set is reserved. Return early here: 341 */ 342 if (mce_flags.smca) 343 return 0; 344 345 pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d " 346 "for bank %d, block %d (MSR%08X=0x%x%08x)\n", 347 b->cpu, apic, b->bank, b->block, b->address, hi, lo); 348 return 0; 349 } 350 351 return 1; 352 }; 353 354 /* Reprogram MCx_MISC MSR behind this threshold bank. */ 355 static void threshold_restart_bank(void *_tr) 356 { 357 struct thresh_restart *tr = _tr; 358 u32 hi, lo; 359 360 rdmsr(tr->b->address, lo, hi); 361 362 if (tr->b->threshold_limit < (hi & THRESHOLD_MAX)) 363 tr->reset = 1; /* limit cannot be lower than err count */ 364 365 if (tr->reset) { /* reset err count and overflow bit */ 366 hi = 367 (hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) | 368 (THRESHOLD_MAX - tr->b->threshold_limit); 369 } else if (tr->old_limit) { /* change limit w/o reset */ 370 int new_count = (hi & THRESHOLD_MAX) + 371 (tr->old_limit - tr->b->threshold_limit); 372 373 hi = (hi & ~MASK_ERR_COUNT_HI) | 374 (new_count & THRESHOLD_MAX); 375 } 376 377 /* clear IntType */ 378 hi &= ~MASK_INT_TYPE_HI; 379 380 if (!tr->b->interrupt_capable) 381 goto done; 382 383 if (tr->set_lvt_off) { 384 if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) { 385 /* set new lvt offset */ 386 hi &= ~MASK_LVTOFF_HI; 387 hi |= tr->lvt_off << 20; 388 } 389 } 390 391 if (tr->b->interrupt_enable) 392 hi |= INT_TYPE_APIC; 393 394 done: 395 396 hi |= MASK_COUNT_EN_HI; 397 wrmsr(tr->b->address, lo, hi); 398 } 399 400 static void mce_threshold_block_init(struct threshold_block *b, int offset) 401 { 402 struct thresh_restart tr = { 403 .b = b, 404 .set_lvt_off = 1, 405 .lvt_off = offset, 406 }; 407 408 b->threshold_limit = THRESHOLD_MAX; 409 threshold_restart_bank(&tr); 410 }; 411 412 static int setup_APIC_mce_threshold(int reserved, int new) 413 { 414 if (reserved < 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR, 415 APIC_EILVT_MSG_FIX, 0)) 416 return new; 417 418 return reserved; 419 } 420 421 static int setup_APIC_deferred_error(int reserved, int new) 422 { 423 if (reserved < 0 && !setup_APIC_eilvt(new, DEFERRED_ERROR_VECTOR, 424 APIC_EILVT_MSG_FIX, 0)) 425 return new; 426 427 return reserved; 428 } 429 430 static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c) 431 { 432 u32 low = 0, high = 0; 433 int def_offset = -1, def_new; 434 435 if (rdmsr_safe(MSR_CU_DEF_ERR, &low, &high)) 436 return; 437 438 def_new = (low & MASK_DEF_LVTOFF) >> 4; 439 if (!(low & MASK_DEF_LVTOFF)) { 440 pr_err(FW_BUG "Your BIOS is not setting up LVT offset 0x2 for deferred error IRQs correctly.\n"); 441 def_new = DEF_LVT_OFF; 442 low = (low & ~MASK_DEF_LVTOFF) | (DEF_LVT_OFF << 4); 443 } 444 445 def_offset = setup_APIC_deferred_error(def_offset, def_new); 446 if ((def_offset == def_new) && 447 (deferred_error_int_vector != amd_deferred_error_interrupt)) 448 deferred_error_int_vector = amd_deferred_error_interrupt; 449 450 if (!mce_flags.smca) 451 low = (low & ~MASK_DEF_INT_TYPE) | DEF_INT_TYPE_APIC; 452 453 wrmsr(MSR_CU_DEF_ERR, low, high); 454 } 455 456 static u32 smca_get_block_address(unsigned int bank, unsigned int block) 457 { 458 u32 low, high; 459 u32 addr = 0; 460 461 if (smca_get_bank_type(bank) == SMCA_RESERVED) 462 return addr; 463 464 if (!block) 465 return MSR_AMD64_SMCA_MCx_MISC(bank); 466 467 /* Check our cache first: */ 468 if (smca_bank_addrs[bank][block] != -1) 469 return smca_bank_addrs[bank][block]; 470 471 /* 472 * For SMCA enabled processors, BLKPTR field of the first MISC register 473 * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4). 474 */ 475 if (rdmsr_safe(MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high)) 476 goto out; 477 478 if (!(low & MCI_CONFIG_MCAX)) 479 goto out; 480 481 if (!rdmsr_safe(MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) && 482 (low & MASK_BLKPTR_LO)) 483 addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1); 484 485 out: 486 smca_bank_addrs[bank][block] = addr; 487 return addr; 488 } 489 490 static u32 get_block_address(u32 current_addr, u32 low, u32 high, 491 unsigned int bank, unsigned int block) 492 { 493 u32 addr = 0, offset = 0; 494 495 if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS)) 496 return addr; 497 498 if (mce_flags.smca) 499 return smca_get_block_address(bank, block); 500 501 /* Fall back to method we used for older processors: */ 502 switch (block) { 503 case 0: 504 addr = msr_ops.misc(bank); 505 break; 506 case 1: 507 offset = ((low & MASK_BLKPTR_LO) >> 21); 508 if (offset) 509 addr = MCG_XBLK_ADDR + offset; 510 break; 511 default: 512 addr = ++current_addr; 513 } 514 return addr; 515 } 516 517 static int 518 prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr, 519 int offset, u32 misc_high) 520 { 521 unsigned int cpu = smp_processor_id(); 522 u32 smca_low, smca_high; 523 struct threshold_block b; 524 int new; 525 526 if (!block) 527 per_cpu(bank_map, cpu) |= (1 << bank); 528 529 memset(&b, 0, sizeof(b)); 530 b.cpu = cpu; 531 b.bank = bank; 532 b.block = block; 533 b.address = addr; 534 b.interrupt_capable = lvt_interrupt_supported(bank, misc_high); 535 536 if (!b.interrupt_capable) 537 goto done; 538 539 b.interrupt_enable = 1; 540 541 if (!mce_flags.smca) { 542 new = (misc_high & MASK_LVTOFF_HI) >> 20; 543 goto set_offset; 544 } 545 546 /* Gather LVT offset for thresholding: */ 547 if (rdmsr_safe(MSR_CU_DEF_ERR, &smca_low, &smca_high)) 548 goto out; 549 550 new = (smca_low & SMCA_THR_LVT_OFF) >> 12; 551 552 set_offset: 553 offset = setup_APIC_mce_threshold(offset, new); 554 if (offset == new) 555 thresholding_irq_en = true; 556 557 done: 558 mce_threshold_block_init(&b, offset); 559 560 out: 561 return offset; 562 } 563 564 bool amd_filter_mce(struct mce *m) 565 { 566 enum smca_bank_types bank_type = smca_get_bank_type(m->bank); 567 struct cpuinfo_x86 *c = &boot_cpu_data; 568 u8 xec = (m->status >> 16) & 0x3F; 569 570 /* See Family 17h Models 10h-2Fh Erratum #1114. */ 571 if (c->x86 == 0x17 && 572 c->x86_model >= 0x10 && c->x86_model <= 0x2F && 573 bank_type == SMCA_IF && xec == 10) 574 return true; 575 576 return false; 577 } 578 579 /* 580 * Turn off thresholding banks for the following conditions: 581 * - MC4_MISC thresholding is not supported on Family 0x15. 582 * - Prevent possible spurious interrupts from the IF bank on Family 0x17 583 * Models 0x10-0x2F due to Erratum #1114. 584 */ 585 void disable_err_thresholding(struct cpuinfo_x86 *c, unsigned int bank) 586 { 587 int i, num_msrs; 588 u64 hwcr; 589 bool need_toggle; 590 u32 msrs[NR_BLOCKS]; 591 592 if (c->x86 == 0x15 && bank == 4) { 593 msrs[0] = 0x00000413; /* MC4_MISC0 */ 594 msrs[1] = 0xc0000408; /* MC4_MISC1 */ 595 num_msrs = 2; 596 } else if (c->x86 == 0x17 && 597 (c->x86_model >= 0x10 && c->x86_model <= 0x2F)) { 598 599 if (smca_get_bank_type(bank) != SMCA_IF) 600 return; 601 602 msrs[0] = MSR_AMD64_SMCA_MCx_MISC(bank); 603 num_msrs = 1; 604 } else { 605 return; 606 } 607 608 rdmsrl(MSR_K7_HWCR, hwcr); 609 610 /* McStatusWrEn has to be set */ 611 need_toggle = !(hwcr & BIT(18)); 612 if (need_toggle) 613 wrmsrl(MSR_K7_HWCR, hwcr | BIT(18)); 614 615 /* Clear CntP bit safely */ 616 for (i = 0; i < num_msrs; i++) 617 msr_clear_bit(msrs[i], 62); 618 619 /* restore old settings */ 620 if (need_toggle) 621 wrmsrl(MSR_K7_HWCR, hwcr); 622 } 623 624 /* cpu init entry point, called from mce.c with preempt off */ 625 void mce_amd_feature_init(struct cpuinfo_x86 *c) 626 { 627 u32 low = 0, high = 0, address = 0; 628 unsigned int bank, block, cpu = smp_processor_id(); 629 int offset = -1; 630 631 for (bank = 0; bank < mca_cfg.banks; ++bank) { 632 if (mce_flags.smca) 633 smca_configure(bank, cpu); 634 635 disable_err_thresholding(c, bank); 636 637 for (block = 0; block < NR_BLOCKS; ++block) { 638 address = get_block_address(address, low, high, bank, block); 639 if (!address) 640 break; 641 642 if (rdmsr_safe(address, &low, &high)) 643 break; 644 645 if (!(high & MASK_VALID_HI)) 646 continue; 647 648 if (!(high & MASK_CNTP_HI) || 649 (high & MASK_LOCKED_HI)) 650 continue; 651 652 offset = prepare_threshold_block(bank, block, address, offset, high); 653 } 654 } 655 656 if (mce_flags.succor) 657 deferred_error_interrupt_enable(c); 658 } 659 660 int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) 661 { 662 u64 dram_base_addr, dram_limit_addr, dram_hole_base; 663 /* We start from the normalized address */ 664 u64 ret_addr = norm_addr; 665 666 u32 tmp; 667 668 u8 die_id_shift, die_id_mask, socket_id_shift, socket_id_mask; 669 u8 intlv_num_dies, intlv_num_chan, intlv_num_sockets; 670 u8 intlv_addr_sel, intlv_addr_bit; 671 u8 num_intlv_bits, hashed_bit; 672 u8 lgcy_mmio_hole_en, base = 0; 673 u8 cs_mask, cs_id = 0; 674 bool hash_enabled = false; 675 676 /* Read D18F0x1B4 (DramOffset), check if base 1 is used. */ 677 if (amd_df_indirect_read(nid, 0, 0x1B4, umc, &tmp)) 678 goto out_err; 679 680 /* Remove HiAddrOffset from normalized address, if enabled: */ 681 if (tmp & BIT(0)) { 682 u64 hi_addr_offset = (tmp & GENMASK_ULL(31, 20)) << 8; 683 684 if (norm_addr >= hi_addr_offset) { 685 ret_addr -= hi_addr_offset; 686 base = 1; 687 } 688 } 689 690 /* Read D18F0x110 (DramBaseAddress). */ 691 if (amd_df_indirect_read(nid, 0, 0x110 + (8 * base), umc, &tmp)) 692 goto out_err; 693 694 /* Check if address range is valid. */ 695 if (!(tmp & BIT(0))) { 696 pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n", 697 __func__, tmp); 698 goto out_err; 699 } 700 701 lgcy_mmio_hole_en = tmp & BIT(1); 702 intlv_num_chan = (tmp >> 4) & 0xF; 703 intlv_addr_sel = (tmp >> 8) & 0x7; 704 dram_base_addr = (tmp & GENMASK_ULL(31, 12)) << 16; 705 706 /* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */ 707 if (intlv_addr_sel > 3) { 708 pr_err("%s: Invalid interleave address select %d.\n", 709 __func__, intlv_addr_sel); 710 goto out_err; 711 } 712 713 /* Read D18F0x114 (DramLimitAddress). */ 714 if (amd_df_indirect_read(nid, 0, 0x114 + (8 * base), umc, &tmp)) 715 goto out_err; 716 717 intlv_num_sockets = (tmp >> 8) & 0x1; 718 intlv_num_dies = (tmp >> 10) & 0x3; 719 dram_limit_addr = ((tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0); 720 721 intlv_addr_bit = intlv_addr_sel + 8; 722 723 /* Re-use intlv_num_chan by setting it equal to log2(#channels) */ 724 switch (intlv_num_chan) { 725 case 0: intlv_num_chan = 0; break; 726 case 1: intlv_num_chan = 1; break; 727 case 3: intlv_num_chan = 2; break; 728 case 5: intlv_num_chan = 3; break; 729 case 7: intlv_num_chan = 4; break; 730 731 case 8: intlv_num_chan = 1; 732 hash_enabled = true; 733 break; 734 default: 735 pr_err("%s: Invalid number of interleaved channels %d.\n", 736 __func__, intlv_num_chan); 737 goto out_err; 738 } 739 740 num_intlv_bits = intlv_num_chan; 741 742 if (intlv_num_dies > 2) { 743 pr_err("%s: Invalid number of interleaved nodes/dies %d.\n", 744 __func__, intlv_num_dies); 745 goto out_err; 746 } 747 748 num_intlv_bits += intlv_num_dies; 749 750 /* Add a bit if sockets are interleaved. */ 751 num_intlv_bits += intlv_num_sockets; 752 753 /* Assert num_intlv_bits <= 4 */ 754 if (num_intlv_bits > 4) { 755 pr_err("%s: Invalid interleave bits %d.\n", 756 __func__, num_intlv_bits); 757 goto out_err; 758 } 759 760 if (num_intlv_bits > 0) { 761 u64 temp_addr_x, temp_addr_i, temp_addr_y; 762 u8 die_id_bit, sock_id_bit, cs_fabric_id; 763 764 /* 765 * Read FabricBlockInstanceInformation3_CS[BlockFabricID]. 766 * This is the fabric id for this coherent slave. Use 767 * umc/channel# as instance id of the coherent slave 768 * for FICAA. 769 */ 770 if (amd_df_indirect_read(nid, 0, 0x50, umc, &tmp)) 771 goto out_err; 772 773 cs_fabric_id = (tmp >> 8) & 0xFF; 774 die_id_bit = 0; 775 776 /* If interleaved over more than 1 channel: */ 777 if (intlv_num_chan) { 778 die_id_bit = intlv_num_chan; 779 cs_mask = (1 << die_id_bit) - 1; 780 cs_id = cs_fabric_id & cs_mask; 781 } 782 783 sock_id_bit = die_id_bit; 784 785 /* Read D18F1x208 (SystemFabricIdMask). */ 786 if (intlv_num_dies || intlv_num_sockets) 787 if (amd_df_indirect_read(nid, 1, 0x208, umc, &tmp)) 788 goto out_err; 789 790 /* If interleaved over more than 1 die. */ 791 if (intlv_num_dies) { 792 sock_id_bit = die_id_bit + intlv_num_dies; 793 die_id_shift = (tmp >> 24) & 0xF; 794 die_id_mask = (tmp >> 8) & 0xFF; 795 796 cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit; 797 } 798 799 /* If interleaved over more than 1 socket. */ 800 if (intlv_num_sockets) { 801 socket_id_shift = (tmp >> 28) & 0xF; 802 socket_id_mask = (tmp >> 16) & 0xFF; 803 804 cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit; 805 } 806 807 /* 808 * The pre-interleaved address consists of XXXXXXIIIYYYYY 809 * where III is the ID for this CS, and XXXXXXYYYYY are the 810 * address bits from the post-interleaved address. 811 * "num_intlv_bits" has been calculated to tell us how many "I" 812 * bits there are. "intlv_addr_bit" tells us how many "Y" bits 813 * there are (where "I" starts). 814 */ 815 temp_addr_y = ret_addr & GENMASK_ULL(intlv_addr_bit-1, 0); 816 temp_addr_i = (cs_id << intlv_addr_bit); 817 temp_addr_x = (ret_addr & GENMASK_ULL(63, intlv_addr_bit)) << num_intlv_bits; 818 ret_addr = temp_addr_x | temp_addr_i | temp_addr_y; 819 } 820 821 /* Add dram base address */ 822 ret_addr += dram_base_addr; 823 824 /* If legacy MMIO hole enabled */ 825 if (lgcy_mmio_hole_en) { 826 if (amd_df_indirect_read(nid, 0, 0x104, umc, &tmp)) 827 goto out_err; 828 829 dram_hole_base = tmp & GENMASK(31, 24); 830 if (ret_addr >= dram_hole_base) 831 ret_addr += (BIT_ULL(32) - dram_hole_base); 832 } 833 834 if (hash_enabled) { 835 /* Save some parentheses and grab ls-bit at the end. */ 836 hashed_bit = (ret_addr >> 12) ^ 837 (ret_addr >> 18) ^ 838 (ret_addr >> 21) ^ 839 (ret_addr >> 30) ^ 840 cs_id; 841 842 hashed_bit &= BIT(0); 843 844 if (hashed_bit != ((ret_addr >> intlv_addr_bit) & BIT(0))) 845 ret_addr ^= BIT(intlv_addr_bit); 846 } 847 848 /* Is calculated system address is above DRAM limit address? */ 849 if (ret_addr > dram_limit_addr) 850 goto out_err; 851 852 *sys_addr = ret_addr; 853 return 0; 854 855 out_err: 856 return -EINVAL; 857 } 858 EXPORT_SYMBOL_GPL(umc_normaddr_to_sysaddr); 859 860 bool amd_mce_is_memory_error(struct mce *m) 861 { 862 /* ErrCodeExt[20:16] */ 863 u8 xec = (m->status >> 16) & 0x1f; 864 865 if (mce_flags.smca) 866 return smca_get_bank_type(m->bank) == SMCA_UMC && xec == 0x0; 867 868 return m->bank == 4 && xec == 0x8; 869 } 870 871 static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc) 872 { 873 struct mce m; 874 875 mce_setup(&m); 876 877 m.status = status; 878 m.misc = misc; 879 m.bank = bank; 880 m.tsc = rdtsc(); 881 882 if (m.status & MCI_STATUS_ADDRV) { 883 m.addr = addr; 884 885 /* 886 * Extract [55:<lsb>] where lsb is the least significant 887 * *valid* bit of the address bits. 888 */ 889 if (mce_flags.smca) { 890 u8 lsb = (m.addr >> 56) & 0x3f; 891 892 m.addr &= GENMASK_ULL(55, lsb); 893 } 894 } 895 896 if (mce_flags.smca) { 897 rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank), m.ipid); 898 899 if (m.status & MCI_STATUS_SYNDV) 900 rdmsrl(MSR_AMD64_SMCA_MCx_SYND(bank), m.synd); 901 } 902 903 mce_log(&m); 904 } 905 906 asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(struct pt_regs *regs) 907 { 908 entering_irq(); 909 trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR); 910 inc_irq_stat(irq_deferred_error_count); 911 deferred_error_int_vector(); 912 trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR); 913 exiting_ack_irq(); 914 } 915 916 /* 917 * Returns true if the logged error is deferred. False, otherwise. 918 */ 919 static inline bool 920 _log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc) 921 { 922 u64 status, addr = 0; 923 924 rdmsrl(msr_stat, status); 925 if (!(status & MCI_STATUS_VAL)) 926 return false; 927 928 if (status & MCI_STATUS_ADDRV) 929 rdmsrl(msr_addr, addr); 930 931 __log_error(bank, status, addr, misc); 932 933 wrmsrl(msr_stat, 0); 934 935 return status & MCI_STATUS_DEFERRED; 936 } 937 938 /* 939 * We have three scenarios for checking for Deferred errors: 940 * 941 * 1) Non-SMCA systems check MCA_STATUS and log error if found. 942 * 2) SMCA systems check MCA_STATUS. If error is found then log it and also 943 * clear MCA_DESTAT. 944 * 3) SMCA systems check MCA_DESTAT, if error was not found in MCA_STATUS, and 945 * log it. 946 */ 947 static void log_error_deferred(unsigned int bank) 948 { 949 bool defrd; 950 951 defrd = _log_error_bank(bank, msr_ops.status(bank), 952 msr_ops.addr(bank), 0); 953 954 if (!mce_flags.smca) 955 return; 956 957 /* Clear MCA_DESTAT if we logged the deferred error from MCA_STATUS. */ 958 if (defrd) { 959 wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0); 960 return; 961 } 962 963 /* 964 * Only deferred errors are logged in MCA_DE{STAT,ADDR} so just check 965 * for a valid error. 966 */ 967 _log_error_bank(bank, MSR_AMD64_SMCA_MCx_DESTAT(bank), 968 MSR_AMD64_SMCA_MCx_DEADDR(bank), 0); 969 } 970 971 /* APIC interrupt handler for deferred errors */ 972 static void amd_deferred_error_interrupt(void) 973 { 974 unsigned int bank; 975 976 for (bank = 0; bank < mca_cfg.banks; ++bank) 977 log_error_deferred(bank); 978 } 979 980 static void log_error_thresholding(unsigned int bank, u64 misc) 981 { 982 _log_error_bank(bank, msr_ops.status(bank), msr_ops.addr(bank), misc); 983 } 984 985 static void log_and_reset_block(struct threshold_block *block) 986 { 987 struct thresh_restart tr; 988 u32 low = 0, high = 0; 989 990 if (!block) 991 return; 992 993 if (rdmsr_safe(block->address, &low, &high)) 994 return; 995 996 if (!(high & MASK_OVERFLOW_HI)) 997 return; 998 999 /* Log the MCE which caused the threshold event. */ 1000 log_error_thresholding(block->bank, ((u64)high << 32) | low); 1001 1002 /* Reset threshold block after logging error. */ 1003 memset(&tr, 0, sizeof(tr)); 1004 tr.b = block; 1005 threshold_restart_bank(&tr); 1006 } 1007 1008 /* 1009 * Threshold interrupt handler will service THRESHOLD_APIC_VECTOR. The interrupt 1010 * goes off when error_count reaches threshold_limit. 1011 */ 1012 static void amd_threshold_interrupt(void) 1013 { 1014 struct threshold_block *first_block = NULL, *block = NULL, *tmp = NULL; 1015 unsigned int bank, cpu = smp_processor_id(); 1016 1017 for (bank = 0; bank < mca_cfg.banks; ++bank) { 1018 if (!(per_cpu(bank_map, cpu) & (1 << bank))) 1019 continue; 1020 1021 first_block = per_cpu(threshold_banks, cpu)[bank]->blocks; 1022 if (!first_block) 1023 continue; 1024 1025 /* 1026 * The first block is also the head of the list. Check it first 1027 * before iterating over the rest. 1028 */ 1029 log_and_reset_block(first_block); 1030 list_for_each_entry_safe(block, tmp, &first_block->miscj, miscj) 1031 log_and_reset_block(block); 1032 } 1033 } 1034 1035 /* 1036 * Sysfs Interface 1037 */ 1038 1039 struct threshold_attr { 1040 struct attribute attr; 1041 ssize_t (*show) (struct threshold_block *, char *); 1042 ssize_t (*store) (struct threshold_block *, const char *, size_t count); 1043 }; 1044 1045 #define SHOW_FIELDS(name) \ 1046 static ssize_t show_ ## name(struct threshold_block *b, char *buf) \ 1047 { \ 1048 return sprintf(buf, "%lu\n", (unsigned long) b->name); \ 1049 } 1050 SHOW_FIELDS(interrupt_enable) 1051 SHOW_FIELDS(threshold_limit) 1052 1053 static ssize_t 1054 store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size) 1055 { 1056 struct thresh_restart tr; 1057 unsigned long new; 1058 1059 if (!b->interrupt_capable) 1060 return -EINVAL; 1061 1062 if (kstrtoul(buf, 0, &new) < 0) 1063 return -EINVAL; 1064 1065 b->interrupt_enable = !!new; 1066 1067 memset(&tr, 0, sizeof(tr)); 1068 tr.b = b; 1069 1070 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); 1071 1072 return size; 1073 } 1074 1075 static ssize_t 1076 store_threshold_limit(struct threshold_block *b, const char *buf, size_t size) 1077 { 1078 struct thresh_restart tr; 1079 unsigned long new; 1080 1081 if (kstrtoul(buf, 0, &new) < 0) 1082 return -EINVAL; 1083 1084 if (new > THRESHOLD_MAX) 1085 new = THRESHOLD_MAX; 1086 if (new < 1) 1087 new = 1; 1088 1089 memset(&tr, 0, sizeof(tr)); 1090 tr.old_limit = b->threshold_limit; 1091 b->threshold_limit = new; 1092 tr.b = b; 1093 1094 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); 1095 1096 return size; 1097 } 1098 1099 static ssize_t show_error_count(struct threshold_block *b, char *buf) 1100 { 1101 u32 lo, hi; 1102 1103 rdmsr_on_cpu(b->cpu, b->address, &lo, &hi); 1104 1105 return sprintf(buf, "%u\n", ((hi & THRESHOLD_MAX) - 1106 (THRESHOLD_MAX - b->threshold_limit))); 1107 } 1108 1109 static struct threshold_attr error_count = { 1110 .attr = {.name = __stringify(error_count), .mode = 0444 }, 1111 .show = show_error_count, 1112 }; 1113 1114 #define RW_ATTR(val) \ 1115 static struct threshold_attr val = { \ 1116 .attr = {.name = __stringify(val), .mode = 0644 }, \ 1117 .show = show_## val, \ 1118 .store = store_## val, \ 1119 }; 1120 1121 RW_ATTR(interrupt_enable); 1122 RW_ATTR(threshold_limit); 1123 1124 static struct attribute *default_attrs[] = { 1125 &threshold_limit.attr, 1126 &error_count.attr, 1127 NULL, /* possibly interrupt_enable if supported, see below */ 1128 NULL, 1129 }; 1130 1131 #define to_block(k) container_of(k, struct threshold_block, kobj) 1132 #define to_attr(a) container_of(a, struct threshold_attr, attr) 1133 1134 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) 1135 { 1136 struct threshold_block *b = to_block(kobj); 1137 struct threshold_attr *a = to_attr(attr); 1138 ssize_t ret; 1139 1140 ret = a->show ? a->show(b, buf) : -EIO; 1141 1142 return ret; 1143 } 1144 1145 static ssize_t store(struct kobject *kobj, struct attribute *attr, 1146 const char *buf, size_t count) 1147 { 1148 struct threshold_block *b = to_block(kobj); 1149 struct threshold_attr *a = to_attr(attr); 1150 ssize_t ret; 1151 1152 ret = a->store ? a->store(b, buf, count) : -EIO; 1153 1154 return ret; 1155 } 1156 1157 static const struct sysfs_ops threshold_ops = { 1158 .show = show, 1159 .store = store, 1160 }; 1161 1162 static struct kobj_type threshold_ktype = { 1163 .sysfs_ops = &threshold_ops, 1164 .default_attrs = default_attrs, 1165 }; 1166 1167 static const char *get_name(unsigned int bank, struct threshold_block *b) 1168 { 1169 enum smca_bank_types bank_type; 1170 1171 if (!mce_flags.smca) { 1172 if (b && bank == 4) 1173 return bank4_names(b); 1174 1175 return th_names[bank]; 1176 } 1177 1178 bank_type = smca_get_bank_type(bank); 1179 if (bank_type >= N_SMCA_BANK_TYPES) 1180 return NULL; 1181 1182 if (b && bank_type == SMCA_UMC) { 1183 if (b->block < ARRAY_SIZE(smca_umc_block_names)) 1184 return smca_umc_block_names[b->block]; 1185 return NULL; 1186 } 1187 1188 if (smca_banks[bank].hwid->count == 1) 1189 return smca_get_name(bank_type); 1190 1191 snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN, 1192 "%s_%x", smca_get_name(bank_type), 1193 smca_banks[bank].sysfs_id); 1194 return buf_mcatype; 1195 } 1196 1197 static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank, 1198 unsigned int block, u32 address) 1199 { 1200 struct threshold_block *b = NULL; 1201 u32 low, high; 1202 int err; 1203 1204 if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS)) 1205 return 0; 1206 1207 if (rdmsr_safe_on_cpu(cpu, address, &low, &high)) 1208 return 0; 1209 1210 if (!(high & MASK_VALID_HI)) { 1211 if (block) 1212 goto recurse; 1213 else 1214 return 0; 1215 } 1216 1217 if (!(high & MASK_CNTP_HI) || 1218 (high & MASK_LOCKED_HI)) 1219 goto recurse; 1220 1221 b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL); 1222 if (!b) 1223 return -ENOMEM; 1224 1225 b->block = block; 1226 b->bank = bank; 1227 b->cpu = cpu; 1228 b->address = address; 1229 b->interrupt_enable = 0; 1230 b->interrupt_capable = lvt_interrupt_supported(bank, high); 1231 b->threshold_limit = THRESHOLD_MAX; 1232 1233 if (b->interrupt_capable) { 1234 threshold_ktype.default_attrs[2] = &interrupt_enable.attr; 1235 b->interrupt_enable = 1; 1236 } else { 1237 threshold_ktype.default_attrs[2] = NULL; 1238 } 1239 1240 INIT_LIST_HEAD(&b->miscj); 1241 1242 if (per_cpu(threshold_banks, cpu)[bank]->blocks) { 1243 list_add(&b->miscj, 1244 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj); 1245 } else { 1246 per_cpu(threshold_banks, cpu)[bank]->blocks = b; 1247 } 1248 1249 err = kobject_init_and_add(&b->kobj, &threshold_ktype, 1250 per_cpu(threshold_banks, cpu)[bank]->kobj, 1251 get_name(bank, b)); 1252 if (err) 1253 goto out_free; 1254 recurse: 1255 address = get_block_address(address, low, high, bank, ++block); 1256 if (!address) 1257 return 0; 1258 1259 err = allocate_threshold_blocks(cpu, bank, block, address); 1260 if (err) 1261 goto out_free; 1262 1263 if (b) 1264 kobject_uevent(&b->kobj, KOBJ_ADD); 1265 1266 return err; 1267 1268 out_free: 1269 if (b) { 1270 kobject_put(&b->kobj); 1271 list_del(&b->miscj); 1272 kfree(b); 1273 } 1274 return err; 1275 } 1276 1277 static int __threshold_add_blocks(struct threshold_bank *b) 1278 { 1279 struct list_head *head = &b->blocks->miscj; 1280 struct threshold_block *pos = NULL; 1281 struct threshold_block *tmp = NULL; 1282 int err = 0; 1283 1284 err = kobject_add(&b->blocks->kobj, b->kobj, b->blocks->kobj.name); 1285 if (err) 1286 return err; 1287 1288 list_for_each_entry_safe(pos, tmp, head, miscj) { 1289 1290 err = kobject_add(&pos->kobj, b->kobj, pos->kobj.name); 1291 if (err) { 1292 list_for_each_entry_safe_reverse(pos, tmp, head, miscj) 1293 kobject_del(&pos->kobj); 1294 1295 return err; 1296 } 1297 } 1298 return err; 1299 } 1300 1301 static int threshold_create_bank(unsigned int cpu, unsigned int bank) 1302 { 1303 struct device *dev = per_cpu(mce_device, cpu); 1304 struct amd_northbridge *nb = NULL; 1305 struct threshold_bank *b = NULL; 1306 const char *name = get_name(bank, NULL); 1307 int err = 0; 1308 1309 if (!dev) 1310 return -ENODEV; 1311 1312 if (is_shared_bank(bank)) { 1313 nb = node_to_amd_nb(amd_get_nb_id(cpu)); 1314 1315 /* threshold descriptor already initialized on this node? */ 1316 if (nb && nb->bank4) { 1317 /* yes, use it */ 1318 b = nb->bank4; 1319 err = kobject_add(b->kobj, &dev->kobj, name); 1320 if (err) 1321 goto out; 1322 1323 per_cpu(threshold_banks, cpu)[bank] = b; 1324 refcount_inc(&b->cpus); 1325 1326 err = __threshold_add_blocks(b); 1327 1328 goto out; 1329 } 1330 } 1331 1332 b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL); 1333 if (!b) { 1334 err = -ENOMEM; 1335 goto out; 1336 } 1337 1338 b->kobj = kobject_create_and_add(name, &dev->kobj); 1339 if (!b->kobj) { 1340 err = -EINVAL; 1341 goto out_free; 1342 } 1343 1344 per_cpu(threshold_banks, cpu)[bank] = b; 1345 1346 if (is_shared_bank(bank)) { 1347 refcount_set(&b->cpus, 1); 1348 1349 /* nb is already initialized, see above */ 1350 if (nb) { 1351 WARN_ON(nb->bank4); 1352 nb->bank4 = b; 1353 } 1354 } 1355 1356 err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank)); 1357 if (!err) 1358 goto out; 1359 1360 out_free: 1361 kfree(b); 1362 1363 out: 1364 return err; 1365 } 1366 1367 static void deallocate_threshold_block(unsigned int cpu, 1368 unsigned int bank) 1369 { 1370 struct threshold_block *pos = NULL; 1371 struct threshold_block *tmp = NULL; 1372 struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank]; 1373 1374 if (!head) 1375 return; 1376 1377 list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) { 1378 kobject_put(&pos->kobj); 1379 list_del(&pos->miscj); 1380 kfree(pos); 1381 } 1382 1383 kfree(per_cpu(threshold_banks, cpu)[bank]->blocks); 1384 per_cpu(threshold_banks, cpu)[bank]->blocks = NULL; 1385 } 1386 1387 static void __threshold_remove_blocks(struct threshold_bank *b) 1388 { 1389 struct threshold_block *pos = NULL; 1390 struct threshold_block *tmp = NULL; 1391 1392 kobject_del(b->kobj); 1393 1394 list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj) 1395 kobject_del(&pos->kobj); 1396 } 1397 1398 static void threshold_remove_bank(unsigned int cpu, int bank) 1399 { 1400 struct amd_northbridge *nb; 1401 struct threshold_bank *b; 1402 1403 b = per_cpu(threshold_banks, cpu)[bank]; 1404 if (!b) 1405 return; 1406 1407 if (!b->blocks) 1408 goto free_out; 1409 1410 if (is_shared_bank(bank)) { 1411 if (!refcount_dec_and_test(&b->cpus)) { 1412 __threshold_remove_blocks(b); 1413 per_cpu(threshold_banks, cpu)[bank] = NULL; 1414 return; 1415 } else { 1416 /* 1417 * the last CPU on this node using the shared bank is 1418 * going away, remove that bank now. 1419 */ 1420 nb = node_to_amd_nb(amd_get_nb_id(cpu)); 1421 nb->bank4 = NULL; 1422 } 1423 } 1424 1425 deallocate_threshold_block(cpu, bank); 1426 1427 free_out: 1428 kobject_del(b->kobj); 1429 kobject_put(b->kobj); 1430 kfree(b); 1431 per_cpu(threshold_banks, cpu)[bank] = NULL; 1432 } 1433 1434 int mce_threshold_remove_device(unsigned int cpu) 1435 { 1436 unsigned int bank; 1437 1438 for (bank = 0; bank < mca_cfg.banks; ++bank) { 1439 if (!(per_cpu(bank_map, cpu) & (1 << bank))) 1440 continue; 1441 threshold_remove_bank(cpu, bank); 1442 } 1443 kfree(per_cpu(threshold_banks, cpu)); 1444 per_cpu(threshold_banks, cpu) = NULL; 1445 return 0; 1446 } 1447 1448 /* create dir/files for all valid threshold banks */ 1449 int mce_threshold_create_device(unsigned int cpu) 1450 { 1451 unsigned int bank; 1452 struct threshold_bank **bp; 1453 int err = 0; 1454 1455 bp = per_cpu(threshold_banks, cpu); 1456 if (bp) 1457 return 0; 1458 1459 bp = kcalloc(mca_cfg.banks, sizeof(struct threshold_bank *), 1460 GFP_KERNEL); 1461 if (!bp) 1462 return -ENOMEM; 1463 1464 per_cpu(threshold_banks, cpu) = bp; 1465 1466 for (bank = 0; bank < mca_cfg.banks; ++bank) { 1467 if (!(per_cpu(bank_map, cpu) & (1 << bank))) 1468 continue; 1469 err = threshold_create_bank(cpu, bank); 1470 if (err) 1471 goto err; 1472 } 1473 return err; 1474 err: 1475 mce_threshold_remove_device(cpu); 1476 return err; 1477 } 1478 1479 static __init int threshold_init_device(void) 1480 { 1481 unsigned lcpu = 0; 1482 1483 /* to hit CPUs online before the notifier is up */ 1484 for_each_online_cpu(lcpu) { 1485 int err = mce_threshold_create_device(lcpu); 1486 1487 if (err) 1488 return err; 1489 } 1490 1491 if (thresholding_irq_en) 1492 mce_threshold_vector = amd_threshold_interrupt; 1493 1494 return 0; 1495 } 1496 /* 1497 * there are 3 funcs which need to be _initcalled in a logic sequence: 1498 * 1. xen_late_init_mcelog 1499 * 2. mcheck_init_device 1500 * 3. threshold_init_device 1501 * 1502 * xen_late_init_mcelog must register xen_mce_chrdev_device before 1503 * native mce_chrdev_device registration if running under xen platform; 1504 * 1505 * mcheck_init_device should be inited before threshold_init_device to 1506 * initialize mce_device, otherwise a NULL ptr dereference will cause panic. 1507 * 1508 * so we use following _initcalls 1509 * 1. device_initcall(xen_late_init_mcelog); 1510 * 2. device_initcall_sync(mcheck_init_device); 1511 * 3. late_initcall(threshold_init_device); 1512 * 1513 * when running under xen, the initcall order is 1,2,3; 1514 * on baremetal, we skip 1 and we do only 2 and 3. 1515 */ 1516 late_initcall(threshold_init_device); 1517