1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Driver for Intel client SoC with integrated memory controller using IBECC 4 * 5 * Copyright (C) 2020 Intel Corporation 6 * 7 * The In-Band ECC (IBECC) IP provides ECC protection to all or specific 8 * regions of the physical memory space. It's used for memory controllers 9 * that don't support the out-of-band ECC which often needs an additional 10 * storage device to each channel for storing ECC data. 11 */ 12 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/pci.h> 16 #include <linux/slab.h> 17 #include <linux/irq_work.h> 18 #include <linux/llist.h> 19 #include <linux/genalloc.h> 20 #include <linux/edac.h> 21 #include <linux/bits.h> 22 #include <linux/io.h> 23 #include <asm/mach_traps.h> 24 #include <asm/nmi.h> 25 #include <asm/mce.h> 26 27 #include "edac_mc.h" 28 #include "edac_module.h" 29 30 #define IGEN6_REVISION "v2.5.1" 31 32 #define EDAC_MOD_STR "igen6_edac" 33 #define IGEN6_NMI_NAME "igen6_ibecc" 34 35 /* Debug macros */ 36 #define igen6_printk(level, fmt, arg...) \ 37 edac_printk(level, "igen6", fmt, ##arg) 38 39 #define igen6_mc_printk(mci, level, fmt, arg...) \ 40 edac_mc_chipset_printk(mci, level, "igen6", fmt, ##arg) 41 42 #define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo)) 43 44 #define NUM_IMC 2 /* Max memory controllers */ 45 #define NUM_CHANNELS 2 /* Max channels */ 46 #define NUM_DIMMS 2 /* Max DIMMs per channel */ 47 48 #define _4GB BIT_ULL(32) 49 50 /* Size of physical memory */ 51 #define TOM_OFFSET 0xa0 52 /* Top of low usable DRAM */ 53 #define TOLUD_OFFSET 0xbc 54 /* Capability register C */ 55 #define CAPID_C_OFFSET 0xec 56 #define CAPID_C_IBECC BIT(15) 57 58 /* Capability register E */ 59 #define CAPID_E_OFFSET 0xf0 60 #define CAPID_E_IBECC BIT(12) 61 #define CAPID_E_IBECC_BIT18 BIT(18) 62 63 /* Error Status */ 64 #define ERRSTS_OFFSET 0xc8 65 #define ERRSTS_CE BIT_ULL(6) 66 #define ERRSTS_UE BIT_ULL(7) 67 68 /* Error Command */ 69 #define ERRCMD_OFFSET 0xca 70 #define ERRCMD_CE BIT_ULL(6) 71 #define ERRCMD_UE BIT_ULL(7) 72 73 /* IBECC MMIO base address */ 74 #define IBECC_BASE (res_cfg->ibecc_base) 75 #define IBECC_ACTIVATE_OFFSET IBECC_BASE 76 #define IBECC_ACTIVATE_EN BIT(0) 77 78 /* IBECC error log */ 79 #define ECC_ERROR_LOG_OFFSET (IBECC_BASE + res_cfg->ibecc_error_log_offset) 80 #define ECC_ERROR_LOG_CE BIT_ULL(62) 81 #define ECC_ERROR_LOG_UE BIT_ULL(63) 82 #define ECC_ERROR_LOG_ADDR_SHIFT 5 83 #define ECC_ERROR_LOG_ADDR(v) GET_BITFIELD(v, 5, 38) 84 #define ECC_ERROR_LOG_ADDR45(v) GET_BITFIELD(v, 5, 45) 85 #define ECC_ERROR_LOG_SYND(v) GET_BITFIELD(v, 46, 61) 86 87 /* Host MMIO base address */ 88 #define MCHBAR_OFFSET 0x48 89 #define MCHBAR_EN BIT_ULL(0) 90 #define MCHBAR_BASE(v) (GET_BITFIELD(v, 16, 38) << 16) 91 #define MCHBAR_SIZE 0x10000 92 93 /* Parameters for the channel decode stage */ 94 #define IMC_BASE (res_cfg->imc_base) 95 #define MAD_INTER_CHANNEL_OFFSET IMC_BASE 96 #define MAD_INTER_CHANNEL_DDR_TYPE(v) GET_BITFIELD(v, 0, 2) 97 #define MAD_INTER_CHANNEL_ECHM(v) GET_BITFIELD(v, 3, 3) 98 #define MAD_INTER_CHANNEL_CH_L_MAP(v) GET_BITFIELD(v, 4, 4) 99 #define MAD_INTER_CHANNEL_CH_S_SIZE(v) ((u64)GET_BITFIELD(v, 12, 19) << 29) 100 101 /* Parameters for DRAM decode stage */ 102 #define MAD_INTRA_CH0_OFFSET (IMC_BASE + 4) 103 #define MAD_INTRA_CH_DIMM_L_MAP(v) GET_BITFIELD(v, 0, 0) 104 105 /* DIMM characteristics */ 106 #define MAD_DIMM_CH0_OFFSET (IMC_BASE + 0xc) 107 #define MAD_DIMM_CH_DIMM_L_SIZE(v) ((u64)GET_BITFIELD(v, 0, 6) << 29) 108 #define MAD_DIMM_CH_DLW(v) GET_BITFIELD(v, 7, 8) 109 #define MAD_DIMM_CH_DIMM_S_SIZE(v) ((u64)GET_BITFIELD(v, 16, 22) << 29) 110 #define MAD_DIMM_CH_DSW(v) GET_BITFIELD(v, 24, 25) 111 112 /* Hash for memory controller selection */ 113 #define MAD_MC_HASH_OFFSET (IMC_BASE + 0x1b8) 114 #define MAC_MC_HASH_LSB(v) GET_BITFIELD(v, 1, 3) 115 116 /* Hash for channel selection */ 117 #define CHANNEL_HASH_OFFSET (IMC_BASE + 0x24) 118 /* Hash for enhanced channel selection */ 119 #define CHANNEL_EHASH_OFFSET (IMC_BASE + 0x28) 120 #define CHANNEL_HASH_MASK(v) (GET_BITFIELD(v, 6, 19) << 6) 121 #define CHANNEL_HASH_LSB_MASK_BIT(v) GET_BITFIELD(v, 24, 26) 122 #define CHANNEL_HASH_MODE(v) GET_BITFIELD(v, 28, 28) 123 124 /* Parameters for memory slice decode stage */ 125 #define MEM_SLICE_HASH_MASK(v) (GET_BITFIELD(v, 6, 19) << 6) 126 #define MEM_SLICE_HASH_LSB_MASK_BIT(v) GET_BITFIELD(v, 24, 26) 127 128 static struct res_config { 129 bool machine_check; 130 int num_imc; 131 u32 imc_base; 132 u32 cmf_base; 133 u32 cmf_size; 134 u32 ms_hash_offset; 135 u32 ibecc_base; 136 u32 ibecc_error_log_offset; 137 bool (*ibecc_available)(struct pci_dev *pdev); 138 /* Extract error address logged in IBECC */ 139 u64 (*err_addr)(u64 ecclog); 140 /* Convert error address logged in IBECC to system physical address */ 141 u64 (*err_addr_to_sys_addr)(u64 eaddr, int mc); 142 /* Convert error address logged in IBECC to integrated memory controller address */ 143 u64 (*err_addr_to_imc_addr)(u64 eaddr, int mc); 144 } *res_cfg; 145 146 struct igen6_imc { 147 int mc; 148 struct mem_ctl_info *mci; 149 struct pci_dev *pdev; 150 struct device dev; 151 void __iomem *window; 152 u64 size; 153 u64 ch_s_size; 154 int ch_l_map; 155 u64 dimm_s_size[NUM_CHANNELS]; 156 u64 dimm_l_size[NUM_CHANNELS]; 157 int dimm_l_map[NUM_CHANNELS]; 158 }; 159 160 static struct igen6_pvt { 161 struct igen6_imc imc[NUM_IMC]; 162 u64 ms_hash; 163 u64 ms_s_size; 164 int ms_l_map; 165 } *igen6_pvt; 166 167 /* The top of low usable DRAM */ 168 static u32 igen6_tolud; 169 /* The size of physical memory */ 170 static u64 igen6_tom; 171 172 struct decoded_addr { 173 int mc; 174 u64 imc_addr; 175 u64 sys_addr; 176 int channel_idx; 177 u64 channel_addr; 178 int sub_channel_idx; 179 u64 sub_channel_addr; 180 }; 181 182 struct ecclog_node { 183 struct llist_node llnode; 184 int mc; 185 u64 ecclog; 186 }; 187 188 /* 189 * In the NMI handler, the driver uses the lock-less memory allocator 190 * to allocate memory to store the IBECC error logs and links the logs 191 * to the lock-less list. Delay printk() and the work of error reporting 192 * to EDAC core in a worker. 193 */ 194 #define ECCLOG_POOL_SIZE PAGE_SIZE 195 static LLIST_HEAD(ecclog_llist); 196 static struct gen_pool *ecclog_pool; 197 static char ecclog_buf[ECCLOG_POOL_SIZE]; 198 static struct irq_work ecclog_irq_work; 199 static struct work_struct ecclog_work; 200 201 /* Compute die IDs for Elkhart Lake with IBECC */ 202 #define DID_EHL_SKU5 0x4514 203 #define DID_EHL_SKU6 0x4528 204 #define DID_EHL_SKU7 0x452a 205 #define DID_EHL_SKU8 0x4516 206 #define DID_EHL_SKU9 0x452c 207 #define DID_EHL_SKU10 0x452e 208 #define DID_EHL_SKU11 0x4532 209 #define DID_EHL_SKU12 0x4518 210 #define DID_EHL_SKU13 0x451a 211 #define DID_EHL_SKU14 0x4534 212 #define DID_EHL_SKU15 0x4536 213 214 /* Compute die IDs for ICL-NNPI with IBECC */ 215 #define DID_ICL_SKU8 0x4581 216 #define DID_ICL_SKU10 0x4585 217 #define DID_ICL_SKU11 0x4589 218 #define DID_ICL_SKU12 0x458d 219 220 /* Compute die IDs for Tiger Lake with IBECC */ 221 #define DID_TGL_SKU 0x9a14 222 223 /* Compute die IDs for Alder Lake with IBECC */ 224 #define DID_ADL_SKU1 0x4601 225 #define DID_ADL_SKU2 0x4602 226 #define DID_ADL_SKU3 0x4621 227 #define DID_ADL_SKU4 0x4641 228 229 /* Compute die IDs for Alder Lake-N with IBECC */ 230 #define DID_ADL_N_SKU1 0x4614 231 #define DID_ADL_N_SKU2 0x4617 232 #define DID_ADL_N_SKU3 0x461b 233 #define DID_ADL_N_SKU4 0x461c 234 #define DID_ADL_N_SKU5 0x4673 235 #define DID_ADL_N_SKU6 0x4674 236 #define DID_ADL_N_SKU7 0x4675 237 #define DID_ADL_N_SKU8 0x4677 238 #define DID_ADL_N_SKU9 0x4678 239 #define DID_ADL_N_SKU10 0x4679 240 #define DID_ADL_N_SKU11 0x467c 241 #define DID_ADL_N_SKU12 0x4632 242 243 /* Compute die IDs for Raptor Lake-P with IBECC */ 244 #define DID_RPL_P_SKU1 0xa706 245 #define DID_RPL_P_SKU2 0xa707 246 #define DID_RPL_P_SKU3 0xa708 247 #define DID_RPL_P_SKU4 0xa716 248 #define DID_RPL_P_SKU5 0xa718 249 250 /* Compute die IDs for Meteor Lake-PS with IBECC */ 251 #define DID_MTL_PS_SKU1 0x7d21 252 #define DID_MTL_PS_SKU2 0x7d22 253 #define DID_MTL_PS_SKU3 0x7d23 254 #define DID_MTL_PS_SKU4 0x7d24 255 256 /* Compute die IDs for Meteor Lake-P with IBECC */ 257 #define DID_MTL_P_SKU1 0x7d01 258 #define DID_MTL_P_SKU2 0x7d02 259 #define DID_MTL_P_SKU3 0x7d14 260 261 /* Compute die IDs for Arrow Lake-UH with IBECC */ 262 #define DID_ARL_UH_SKU1 0x7d06 263 #define DID_ARL_UH_SKU2 0x7d20 264 #define DID_ARL_UH_SKU3 0x7d30 265 266 static int get_mchbar(struct pci_dev *pdev, u64 *mchbar) 267 { 268 union { 269 u64 v; 270 struct { 271 u32 v_lo; 272 u32 v_hi; 273 }; 274 } u; 275 276 if (pci_read_config_dword(pdev, MCHBAR_OFFSET, &u.v_lo)) { 277 igen6_printk(KERN_ERR, "Failed to read lower MCHBAR\n"); 278 return -ENODEV; 279 } 280 281 if (pci_read_config_dword(pdev, MCHBAR_OFFSET + 4, &u.v_hi)) { 282 igen6_printk(KERN_ERR, "Failed to read upper MCHBAR\n"); 283 return -ENODEV; 284 } 285 286 if (!(u.v & MCHBAR_EN)) { 287 igen6_printk(KERN_ERR, "MCHBAR is disabled\n"); 288 return -ENODEV; 289 } 290 291 *mchbar = MCHBAR_BASE(u.v); 292 293 return 0; 294 } 295 296 static bool ehl_ibecc_available(struct pci_dev *pdev) 297 { 298 u32 v; 299 300 if (pci_read_config_dword(pdev, CAPID_C_OFFSET, &v)) 301 return false; 302 303 return !!(CAPID_C_IBECC & v); 304 } 305 306 static u64 ehl_err_addr_to_sys_addr(u64 eaddr, int mc) 307 { 308 return eaddr; 309 } 310 311 static u64 ehl_err_addr_to_imc_addr(u64 eaddr, int mc) 312 { 313 if (eaddr < igen6_tolud) 314 return eaddr; 315 316 if (igen6_tom <= _4GB) 317 return eaddr + igen6_tolud - _4GB; 318 319 if (eaddr >= igen6_tom) 320 return eaddr + igen6_tolud - igen6_tom; 321 322 return eaddr; 323 } 324 325 static bool icl_ibecc_available(struct pci_dev *pdev) 326 { 327 u32 v; 328 329 if (pci_read_config_dword(pdev, CAPID_C_OFFSET, &v)) 330 return false; 331 332 return !(CAPID_C_IBECC & v) && 333 (boot_cpu_data.x86_stepping >= 1); 334 } 335 336 static bool tgl_ibecc_available(struct pci_dev *pdev) 337 { 338 u32 v; 339 340 if (pci_read_config_dword(pdev, CAPID_E_OFFSET, &v)) 341 return false; 342 343 return !(CAPID_E_IBECC & v); 344 } 345 346 static bool mtl_p_ibecc_available(struct pci_dev *pdev) 347 { 348 u32 v; 349 350 if (pci_read_config_dword(pdev, CAPID_E_OFFSET, &v)) 351 return false; 352 353 return !(CAPID_E_IBECC_BIT18 & v); 354 } 355 356 static bool mtl_ps_ibecc_available(struct pci_dev *pdev) 357 { 358 #define MCHBAR_MEMSS_IBECCDIS 0x13c00 359 void __iomem *window; 360 u64 mchbar; 361 u32 val; 362 363 if (get_mchbar(pdev, &mchbar)) 364 return false; 365 366 window = ioremap(mchbar, MCHBAR_SIZE * 2); 367 if (!window) { 368 igen6_printk(KERN_ERR, "Failed to ioremap 0x%llx\n", mchbar); 369 return false; 370 } 371 372 val = readl(window + MCHBAR_MEMSS_IBECCDIS); 373 iounmap(window); 374 375 /* Bit6: 1 - IBECC is disabled, 0 - IBECC isn't disabled */ 376 return !GET_BITFIELD(val, 6, 6); 377 } 378 379 static u64 mem_addr_to_sys_addr(u64 maddr) 380 { 381 if (maddr < igen6_tolud) 382 return maddr; 383 384 if (igen6_tom <= _4GB) 385 return maddr - igen6_tolud + _4GB; 386 387 if (maddr < _4GB) 388 return maddr - igen6_tolud + igen6_tom; 389 390 return maddr; 391 } 392 393 static u64 mem_slice_hash(u64 addr, u64 mask, u64 hash_init, int intlv_bit) 394 { 395 u64 hash_addr = addr & mask, hash = hash_init; 396 u64 intlv = (addr >> intlv_bit) & 1; 397 int i; 398 399 for (i = 6; i < 20; i++) 400 hash ^= (hash_addr >> i) & 1; 401 402 return hash ^ intlv; 403 } 404 405 static u64 tgl_err_addr_to_mem_addr(u64 eaddr, int mc) 406 { 407 u64 maddr, hash, mask, ms_s_size; 408 int intlv_bit; 409 u32 ms_hash; 410 411 ms_s_size = igen6_pvt->ms_s_size; 412 if (eaddr >= ms_s_size) 413 return eaddr + ms_s_size; 414 415 ms_hash = igen6_pvt->ms_hash; 416 417 mask = MEM_SLICE_HASH_MASK(ms_hash); 418 intlv_bit = MEM_SLICE_HASH_LSB_MASK_BIT(ms_hash) + 6; 419 420 maddr = GET_BITFIELD(eaddr, intlv_bit, 63) << (intlv_bit + 1) | 421 GET_BITFIELD(eaddr, 0, intlv_bit - 1); 422 423 hash = mem_slice_hash(maddr, mask, mc, intlv_bit); 424 425 return maddr | (hash << intlv_bit); 426 } 427 428 static u64 tgl_err_addr_to_sys_addr(u64 eaddr, int mc) 429 { 430 u64 maddr = tgl_err_addr_to_mem_addr(eaddr, mc); 431 432 return mem_addr_to_sys_addr(maddr); 433 } 434 435 static u64 tgl_err_addr_to_imc_addr(u64 eaddr, int mc) 436 { 437 return eaddr; 438 } 439 440 static u64 adl_err_addr_to_sys_addr(u64 eaddr, int mc) 441 { 442 return mem_addr_to_sys_addr(eaddr); 443 } 444 445 static u64 adl_err_addr_to_imc_addr(u64 eaddr, int mc) 446 { 447 u64 imc_addr, ms_s_size = igen6_pvt->ms_s_size; 448 struct igen6_imc *imc = &igen6_pvt->imc[mc]; 449 int intlv_bit; 450 u32 mc_hash; 451 452 if (eaddr >= 2 * ms_s_size) 453 return eaddr - ms_s_size; 454 455 mc_hash = readl(imc->window + MAD_MC_HASH_OFFSET); 456 457 intlv_bit = MAC_MC_HASH_LSB(mc_hash) + 6; 458 459 imc_addr = GET_BITFIELD(eaddr, intlv_bit + 1, 63) << intlv_bit | 460 GET_BITFIELD(eaddr, 0, intlv_bit - 1); 461 462 return imc_addr; 463 } 464 465 static u64 rpl_p_err_addr(u64 ecclog) 466 { 467 return ECC_ERROR_LOG_ADDR45(ecclog); 468 } 469 470 static struct res_config ehl_cfg = { 471 .num_imc = 1, 472 .imc_base = 0x5000, 473 .ibecc_base = 0xdc00, 474 .ibecc_available = ehl_ibecc_available, 475 .ibecc_error_log_offset = 0x170, 476 .err_addr_to_sys_addr = ehl_err_addr_to_sys_addr, 477 .err_addr_to_imc_addr = ehl_err_addr_to_imc_addr, 478 }; 479 480 static struct res_config icl_cfg = { 481 .num_imc = 1, 482 .imc_base = 0x5000, 483 .ibecc_base = 0xd800, 484 .ibecc_error_log_offset = 0x170, 485 .ibecc_available = icl_ibecc_available, 486 .err_addr_to_sys_addr = ehl_err_addr_to_sys_addr, 487 .err_addr_to_imc_addr = ehl_err_addr_to_imc_addr, 488 }; 489 490 static struct res_config tgl_cfg = { 491 .machine_check = true, 492 .num_imc = 2, 493 .imc_base = 0x5000, 494 .cmf_base = 0x11000, 495 .cmf_size = 0x800, 496 .ms_hash_offset = 0xac, 497 .ibecc_base = 0xd400, 498 .ibecc_error_log_offset = 0x170, 499 .ibecc_available = tgl_ibecc_available, 500 .err_addr_to_sys_addr = tgl_err_addr_to_sys_addr, 501 .err_addr_to_imc_addr = tgl_err_addr_to_imc_addr, 502 }; 503 504 static struct res_config adl_cfg = { 505 .machine_check = true, 506 .num_imc = 2, 507 .imc_base = 0xd800, 508 .ibecc_base = 0xd400, 509 .ibecc_error_log_offset = 0x68, 510 .ibecc_available = tgl_ibecc_available, 511 .err_addr_to_sys_addr = adl_err_addr_to_sys_addr, 512 .err_addr_to_imc_addr = adl_err_addr_to_imc_addr, 513 }; 514 515 static struct res_config adl_n_cfg = { 516 .machine_check = true, 517 .num_imc = 1, 518 .imc_base = 0xd800, 519 .ibecc_base = 0xd400, 520 .ibecc_error_log_offset = 0x68, 521 .ibecc_available = tgl_ibecc_available, 522 .err_addr_to_sys_addr = adl_err_addr_to_sys_addr, 523 .err_addr_to_imc_addr = adl_err_addr_to_imc_addr, 524 }; 525 526 static struct res_config rpl_p_cfg = { 527 .machine_check = true, 528 .num_imc = 2, 529 .imc_base = 0xd800, 530 .ibecc_base = 0xd400, 531 .ibecc_error_log_offset = 0x68, 532 .ibecc_available = tgl_ibecc_available, 533 .err_addr = rpl_p_err_addr, 534 .err_addr_to_sys_addr = adl_err_addr_to_sys_addr, 535 .err_addr_to_imc_addr = adl_err_addr_to_imc_addr, 536 }; 537 538 static struct res_config mtl_ps_cfg = { 539 .machine_check = true, 540 .num_imc = 2, 541 .imc_base = 0xd800, 542 .ibecc_base = 0xd400, 543 .ibecc_error_log_offset = 0x170, 544 .ibecc_available = mtl_ps_ibecc_available, 545 .err_addr_to_sys_addr = adl_err_addr_to_sys_addr, 546 .err_addr_to_imc_addr = adl_err_addr_to_imc_addr, 547 }; 548 549 static struct res_config mtl_p_cfg = { 550 .machine_check = true, 551 .num_imc = 2, 552 .imc_base = 0xd800, 553 .ibecc_base = 0xd400, 554 .ibecc_error_log_offset = 0x170, 555 .ibecc_available = mtl_p_ibecc_available, 556 .err_addr_to_sys_addr = adl_err_addr_to_sys_addr, 557 .err_addr_to_imc_addr = adl_err_addr_to_imc_addr, 558 }; 559 560 static const struct pci_device_id igen6_pci_tbl[] = { 561 { PCI_VDEVICE(INTEL, DID_EHL_SKU5), (kernel_ulong_t)&ehl_cfg }, 562 { PCI_VDEVICE(INTEL, DID_EHL_SKU6), (kernel_ulong_t)&ehl_cfg }, 563 { PCI_VDEVICE(INTEL, DID_EHL_SKU7), (kernel_ulong_t)&ehl_cfg }, 564 { PCI_VDEVICE(INTEL, DID_EHL_SKU8), (kernel_ulong_t)&ehl_cfg }, 565 { PCI_VDEVICE(INTEL, DID_EHL_SKU9), (kernel_ulong_t)&ehl_cfg }, 566 { PCI_VDEVICE(INTEL, DID_EHL_SKU10), (kernel_ulong_t)&ehl_cfg }, 567 { PCI_VDEVICE(INTEL, DID_EHL_SKU11), (kernel_ulong_t)&ehl_cfg }, 568 { PCI_VDEVICE(INTEL, DID_EHL_SKU12), (kernel_ulong_t)&ehl_cfg }, 569 { PCI_VDEVICE(INTEL, DID_EHL_SKU13), (kernel_ulong_t)&ehl_cfg }, 570 { PCI_VDEVICE(INTEL, DID_EHL_SKU14), (kernel_ulong_t)&ehl_cfg }, 571 { PCI_VDEVICE(INTEL, DID_EHL_SKU15), (kernel_ulong_t)&ehl_cfg }, 572 { PCI_VDEVICE(INTEL, DID_ICL_SKU8), (kernel_ulong_t)&icl_cfg }, 573 { PCI_VDEVICE(INTEL, DID_ICL_SKU10), (kernel_ulong_t)&icl_cfg }, 574 { PCI_VDEVICE(INTEL, DID_ICL_SKU11), (kernel_ulong_t)&icl_cfg }, 575 { PCI_VDEVICE(INTEL, DID_ICL_SKU12), (kernel_ulong_t)&icl_cfg }, 576 { PCI_VDEVICE(INTEL, DID_TGL_SKU), (kernel_ulong_t)&tgl_cfg }, 577 { PCI_VDEVICE(INTEL, DID_ADL_SKU1), (kernel_ulong_t)&adl_cfg }, 578 { PCI_VDEVICE(INTEL, DID_ADL_SKU2), (kernel_ulong_t)&adl_cfg }, 579 { PCI_VDEVICE(INTEL, DID_ADL_SKU3), (kernel_ulong_t)&adl_cfg }, 580 { PCI_VDEVICE(INTEL, DID_ADL_SKU4), (kernel_ulong_t)&adl_cfg }, 581 { PCI_VDEVICE(INTEL, DID_ADL_N_SKU1), (kernel_ulong_t)&adl_n_cfg }, 582 { PCI_VDEVICE(INTEL, DID_ADL_N_SKU2), (kernel_ulong_t)&adl_n_cfg }, 583 { PCI_VDEVICE(INTEL, DID_ADL_N_SKU3), (kernel_ulong_t)&adl_n_cfg }, 584 { PCI_VDEVICE(INTEL, DID_ADL_N_SKU4), (kernel_ulong_t)&adl_n_cfg }, 585 { PCI_VDEVICE(INTEL, DID_ADL_N_SKU5), (kernel_ulong_t)&adl_n_cfg }, 586 { PCI_VDEVICE(INTEL, DID_ADL_N_SKU6), (kernel_ulong_t)&adl_n_cfg }, 587 { PCI_VDEVICE(INTEL, DID_ADL_N_SKU7), (kernel_ulong_t)&adl_n_cfg }, 588 { PCI_VDEVICE(INTEL, DID_ADL_N_SKU8), (kernel_ulong_t)&adl_n_cfg }, 589 { PCI_VDEVICE(INTEL, DID_ADL_N_SKU9), (kernel_ulong_t)&adl_n_cfg }, 590 { PCI_VDEVICE(INTEL, DID_ADL_N_SKU10), (kernel_ulong_t)&adl_n_cfg }, 591 { PCI_VDEVICE(INTEL, DID_ADL_N_SKU11), (kernel_ulong_t)&adl_n_cfg }, 592 { PCI_VDEVICE(INTEL, DID_ADL_N_SKU12), (kernel_ulong_t)&adl_n_cfg }, 593 { PCI_VDEVICE(INTEL, DID_RPL_P_SKU1), (kernel_ulong_t)&rpl_p_cfg }, 594 { PCI_VDEVICE(INTEL, DID_RPL_P_SKU2), (kernel_ulong_t)&rpl_p_cfg }, 595 { PCI_VDEVICE(INTEL, DID_RPL_P_SKU3), (kernel_ulong_t)&rpl_p_cfg }, 596 { PCI_VDEVICE(INTEL, DID_RPL_P_SKU4), (kernel_ulong_t)&rpl_p_cfg }, 597 { PCI_VDEVICE(INTEL, DID_RPL_P_SKU5), (kernel_ulong_t)&rpl_p_cfg }, 598 { PCI_VDEVICE(INTEL, DID_MTL_PS_SKU1), (kernel_ulong_t)&mtl_ps_cfg }, 599 { PCI_VDEVICE(INTEL, DID_MTL_PS_SKU2), (kernel_ulong_t)&mtl_ps_cfg }, 600 { PCI_VDEVICE(INTEL, DID_MTL_PS_SKU3), (kernel_ulong_t)&mtl_ps_cfg }, 601 { PCI_VDEVICE(INTEL, DID_MTL_PS_SKU4), (kernel_ulong_t)&mtl_ps_cfg }, 602 { PCI_VDEVICE(INTEL, DID_MTL_P_SKU1), (kernel_ulong_t)&mtl_p_cfg }, 603 { PCI_VDEVICE(INTEL, DID_MTL_P_SKU2), (kernel_ulong_t)&mtl_p_cfg }, 604 { PCI_VDEVICE(INTEL, DID_MTL_P_SKU3), (kernel_ulong_t)&mtl_p_cfg }, 605 { PCI_VDEVICE(INTEL, DID_ARL_UH_SKU1), (kernel_ulong_t)&mtl_p_cfg }, 606 { PCI_VDEVICE(INTEL, DID_ARL_UH_SKU2), (kernel_ulong_t)&mtl_p_cfg }, 607 { PCI_VDEVICE(INTEL, DID_ARL_UH_SKU3), (kernel_ulong_t)&mtl_p_cfg }, 608 { }, 609 }; 610 MODULE_DEVICE_TABLE(pci, igen6_pci_tbl); 611 612 static enum dev_type get_width(int dimm_l, u32 mad_dimm) 613 { 614 u32 w = dimm_l ? MAD_DIMM_CH_DLW(mad_dimm) : 615 MAD_DIMM_CH_DSW(mad_dimm); 616 617 switch (w) { 618 case 0: 619 return DEV_X8; 620 case 1: 621 return DEV_X16; 622 case 2: 623 return DEV_X32; 624 default: 625 return DEV_UNKNOWN; 626 } 627 } 628 629 static enum mem_type get_memory_type(u32 mad_inter) 630 { 631 u32 t = MAD_INTER_CHANNEL_DDR_TYPE(mad_inter); 632 633 switch (t) { 634 case 0: 635 return MEM_DDR4; 636 case 1: 637 return MEM_DDR3; 638 case 2: 639 return MEM_LPDDR3; 640 case 3: 641 return MEM_LPDDR4; 642 case 4: 643 return MEM_WIO2; 644 default: 645 return MEM_UNKNOWN; 646 } 647 } 648 649 static int decode_chan_idx(u64 addr, u64 mask, int intlv_bit) 650 { 651 u64 hash_addr = addr & mask, hash = 0; 652 u64 intlv = (addr >> intlv_bit) & 1; 653 int i; 654 655 for (i = 6; i < 20; i++) 656 hash ^= (hash_addr >> i) & 1; 657 658 return (int)hash ^ intlv; 659 } 660 661 static u64 decode_channel_addr(u64 addr, int intlv_bit) 662 { 663 u64 channel_addr; 664 665 /* Remove the interleave bit and shift upper part down to fill gap */ 666 channel_addr = GET_BITFIELD(addr, intlv_bit + 1, 63) << intlv_bit; 667 channel_addr |= GET_BITFIELD(addr, 0, intlv_bit - 1); 668 669 return channel_addr; 670 } 671 672 static void decode_addr(u64 addr, u32 hash, u64 s_size, int l_map, 673 int *idx, u64 *sub_addr) 674 { 675 int intlv_bit = CHANNEL_HASH_LSB_MASK_BIT(hash) + 6; 676 677 if (addr > 2 * s_size) { 678 *sub_addr = addr - s_size; 679 *idx = l_map; 680 return; 681 } 682 683 if (CHANNEL_HASH_MODE(hash)) { 684 *sub_addr = decode_channel_addr(addr, intlv_bit); 685 *idx = decode_chan_idx(addr, CHANNEL_HASH_MASK(hash), intlv_bit); 686 } else { 687 *sub_addr = decode_channel_addr(addr, 6); 688 *idx = GET_BITFIELD(addr, 6, 6); 689 } 690 } 691 692 static int igen6_decode(struct decoded_addr *res) 693 { 694 struct igen6_imc *imc = &igen6_pvt->imc[res->mc]; 695 u64 addr = res->imc_addr, sub_addr, s_size; 696 int idx, l_map; 697 u32 hash; 698 699 if (addr >= igen6_tom) { 700 edac_dbg(0, "Address 0x%llx out of range\n", addr); 701 return -EINVAL; 702 } 703 704 /* Decode channel */ 705 hash = readl(imc->window + CHANNEL_HASH_OFFSET); 706 s_size = imc->ch_s_size; 707 l_map = imc->ch_l_map; 708 decode_addr(addr, hash, s_size, l_map, &idx, &sub_addr); 709 res->channel_idx = idx; 710 res->channel_addr = sub_addr; 711 712 /* Decode sub-channel/DIMM */ 713 hash = readl(imc->window + CHANNEL_EHASH_OFFSET); 714 s_size = imc->dimm_s_size[idx]; 715 l_map = imc->dimm_l_map[idx]; 716 decode_addr(res->channel_addr, hash, s_size, l_map, &idx, &sub_addr); 717 res->sub_channel_idx = idx; 718 res->sub_channel_addr = sub_addr; 719 720 return 0; 721 } 722 723 static void igen6_output_error(struct decoded_addr *res, 724 struct mem_ctl_info *mci, u64 ecclog) 725 { 726 enum hw_event_mc_err_type type = ecclog & ECC_ERROR_LOG_UE ? 727 HW_EVENT_ERR_UNCORRECTED : 728 HW_EVENT_ERR_CORRECTED; 729 730 edac_mc_handle_error(type, mci, 1, 731 res->sys_addr >> PAGE_SHIFT, 732 res->sys_addr & ~PAGE_MASK, 733 ECC_ERROR_LOG_SYND(ecclog), 734 res->channel_idx, res->sub_channel_idx, 735 -1, "", ""); 736 } 737 738 static struct gen_pool *ecclog_gen_pool_create(void) 739 { 740 struct gen_pool *pool; 741 742 pool = gen_pool_create(ilog2(sizeof(struct ecclog_node)), -1); 743 if (!pool) 744 return NULL; 745 746 if (gen_pool_add(pool, (unsigned long)ecclog_buf, ECCLOG_POOL_SIZE, -1)) { 747 gen_pool_destroy(pool); 748 return NULL; 749 } 750 751 return pool; 752 } 753 754 static int ecclog_gen_pool_add(int mc, u64 ecclog) 755 { 756 struct ecclog_node *node; 757 758 node = (void *)gen_pool_alloc(ecclog_pool, sizeof(*node)); 759 if (!node) 760 return -ENOMEM; 761 762 node->mc = mc; 763 node->ecclog = ecclog; 764 llist_add(&node->llnode, &ecclog_llist); 765 766 return 0; 767 } 768 769 /* 770 * Either the memory-mapped I/O status register ECC_ERROR_LOG or the PCI 771 * configuration space status register ERRSTS can indicate whether a 772 * correctable error or an uncorrectable error occurred. We only use the 773 * ECC_ERROR_LOG register to check error type, but need to clear both 774 * registers to enable future error events. 775 */ 776 static u64 ecclog_read_and_clear(struct igen6_imc *imc) 777 { 778 u64 ecclog = readq(imc->window + ECC_ERROR_LOG_OFFSET); 779 780 if (ecclog & (ECC_ERROR_LOG_CE | ECC_ERROR_LOG_UE)) { 781 /* Clear CE/UE bits by writing 1s */ 782 writeq(ecclog, imc->window + ECC_ERROR_LOG_OFFSET); 783 return ecclog; 784 } 785 786 return 0; 787 } 788 789 static void errsts_clear(struct igen6_imc *imc) 790 { 791 u16 errsts; 792 793 if (pci_read_config_word(imc->pdev, ERRSTS_OFFSET, &errsts)) { 794 igen6_printk(KERN_ERR, "Failed to read ERRSTS\n"); 795 return; 796 } 797 798 /* Clear CE/UE bits by writing 1s */ 799 if (errsts & (ERRSTS_CE | ERRSTS_UE)) 800 pci_write_config_word(imc->pdev, ERRSTS_OFFSET, errsts); 801 } 802 803 static int errcmd_enable_error_reporting(bool enable) 804 { 805 struct igen6_imc *imc = &igen6_pvt->imc[0]; 806 u16 errcmd; 807 int rc; 808 809 rc = pci_read_config_word(imc->pdev, ERRCMD_OFFSET, &errcmd); 810 if (rc) 811 return pcibios_err_to_errno(rc); 812 813 if (enable) 814 errcmd |= ERRCMD_CE | ERRSTS_UE; 815 else 816 errcmd &= ~(ERRCMD_CE | ERRSTS_UE); 817 818 rc = pci_write_config_word(imc->pdev, ERRCMD_OFFSET, errcmd); 819 if (rc) 820 return pcibios_err_to_errno(rc); 821 822 return 0; 823 } 824 825 static int ecclog_handler(void) 826 { 827 struct igen6_imc *imc; 828 int i, n = 0; 829 u64 ecclog; 830 831 for (i = 0; i < res_cfg->num_imc; i++) { 832 imc = &igen6_pvt->imc[i]; 833 834 /* errsts_clear() isn't NMI-safe. Delay it in the IRQ context */ 835 836 ecclog = ecclog_read_and_clear(imc); 837 if (!ecclog) 838 continue; 839 840 if (!ecclog_gen_pool_add(i, ecclog)) 841 irq_work_queue(&ecclog_irq_work); 842 843 n++; 844 } 845 846 return n; 847 } 848 849 static void ecclog_work_cb(struct work_struct *work) 850 { 851 struct ecclog_node *node, *tmp; 852 struct mem_ctl_info *mci; 853 struct llist_node *head; 854 struct decoded_addr res; 855 u64 eaddr; 856 857 head = llist_del_all(&ecclog_llist); 858 if (!head) 859 return; 860 861 llist_for_each_entry_safe(node, tmp, head, llnode) { 862 memset(&res, 0, sizeof(res)); 863 if (res_cfg->err_addr) 864 eaddr = res_cfg->err_addr(node->ecclog); 865 else 866 eaddr = ECC_ERROR_LOG_ADDR(node->ecclog) << 867 ECC_ERROR_LOG_ADDR_SHIFT; 868 res.mc = node->mc; 869 res.sys_addr = res_cfg->err_addr_to_sys_addr(eaddr, res.mc); 870 res.imc_addr = res_cfg->err_addr_to_imc_addr(eaddr, res.mc); 871 872 mci = igen6_pvt->imc[res.mc].mci; 873 874 edac_dbg(2, "MC %d, ecclog = 0x%llx\n", node->mc, node->ecclog); 875 igen6_mc_printk(mci, KERN_DEBUG, "HANDLING IBECC MEMORY ERROR\n"); 876 igen6_mc_printk(mci, KERN_DEBUG, "ADDR 0x%llx ", res.sys_addr); 877 878 if (!igen6_decode(&res)) 879 igen6_output_error(&res, mci, node->ecclog); 880 881 gen_pool_free(ecclog_pool, (unsigned long)node, sizeof(*node)); 882 } 883 } 884 885 static void ecclog_irq_work_cb(struct irq_work *irq_work) 886 { 887 int i; 888 889 for (i = 0; i < res_cfg->num_imc; i++) 890 errsts_clear(&igen6_pvt->imc[i]); 891 892 if (!llist_empty(&ecclog_llist)) 893 schedule_work(&ecclog_work); 894 } 895 896 static int ecclog_nmi_handler(unsigned int cmd, struct pt_regs *regs) 897 { 898 unsigned char reason; 899 900 if (!ecclog_handler()) 901 return NMI_DONE; 902 903 /* 904 * Both In-Band ECC correctable error and uncorrectable error are 905 * reported by SERR# NMI. The NMI generic code (see pci_serr_error()) 906 * doesn't clear the bit NMI_REASON_CLEAR_SERR (in port 0x61) to 907 * re-enable the SERR# NMI after NMI handling. So clear this bit here 908 * to re-enable SERR# NMI for receiving future In-Band ECC errors. 909 */ 910 reason = x86_platform.get_nmi_reason() & NMI_REASON_CLEAR_MASK; 911 reason |= NMI_REASON_CLEAR_SERR; 912 outb(reason, NMI_REASON_PORT); 913 reason &= ~NMI_REASON_CLEAR_SERR; 914 outb(reason, NMI_REASON_PORT); 915 916 return NMI_HANDLED; 917 } 918 919 static int ecclog_mce_handler(struct notifier_block *nb, unsigned long val, 920 void *data) 921 { 922 struct mce *mce = (struct mce *)data; 923 char *type; 924 925 if (mce->kflags & MCE_HANDLED_CEC) 926 return NOTIFY_DONE; 927 928 /* 929 * Ignore unless this is a memory related error. 930 * We don't check the bit MCI_STATUS_ADDRV of MCi_STATUS here, 931 * since this bit isn't set on some CPU (e.g., Tiger Lake UP3). 932 */ 933 if ((mce->status & 0xefff) >> 7 != 1) 934 return NOTIFY_DONE; 935 936 if (mce->mcgstatus & MCG_STATUS_MCIP) 937 type = "Exception"; 938 else 939 type = "Event"; 940 941 edac_dbg(0, "CPU %d: Machine Check %s: 0x%llx Bank %d: 0x%llx\n", 942 mce->extcpu, type, mce->mcgstatus, 943 mce->bank, mce->status); 944 edac_dbg(0, "TSC 0x%llx\n", mce->tsc); 945 edac_dbg(0, "ADDR 0x%llx\n", mce->addr); 946 edac_dbg(0, "MISC 0x%llx\n", mce->misc); 947 edac_dbg(0, "PROCESSOR %u:0x%x TIME %llu SOCKET %u APIC 0x%x\n", 948 mce->cpuvendor, mce->cpuid, mce->time, 949 mce->socketid, mce->apicid); 950 /* 951 * We just use the Machine Check for the memory error notification. 952 * Each memory controller is associated with an IBECC instance. 953 * Directly read and clear the error information(error address and 954 * error type) on all the IBECC instances so that we know on which 955 * memory controller the memory error(s) occurred. 956 */ 957 if (!ecclog_handler()) 958 return NOTIFY_DONE; 959 960 mce->kflags |= MCE_HANDLED_EDAC; 961 962 return NOTIFY_DONE; 963 } 964 965 static struct notifier_block ecclog_mce_dec = { 966 .notifier_call = ecclog_mce_handler, 967 .priority = MCE_PRIO_EDAC, 968 }; 969 970 static bool igen6_check_ecc(struct igen6_imc *imc) 971 { 972 u32 activate = readl(imc->window + IBECC_ACTIVATE_OFFSET); 973 974 return !!(activate & IBECC_ACTIVATE_EN); 975 } 976 977 static int igen6_get_dimm_config(struct mem_ctl_info *mci) 978 { 979 struct igen6_imc *imc = mci->pvt_info; 980 u32 mad_inter, mad_intra, mad_dimm; 981 int i, j, ndimms, mc = imc->mc; 982 struct dimm_info *dimm; 983 enum mem_type mtype; 984 enum dev_type dtype; 985 u64 dsize; 986 bool ecc; 987 988 edac_dbg(2, "\n"); 989 990 mad_inter = readl(imc->window + MAD_INTER_CHANNEL_OFFSET); 991 mtype = get_memory_type(mad_inter); 992 ecc = igen6_check_ecc(imc); 993 imc->ch_s_size = MAD_INTER_CHANNEL_CH_S_SIZE(mad_inter); 994 imc->ch_l_map = MAD_INTER_CHANNEL_CH_L_MAP(mad_inter); 995 996 for (i = 0; i < NUM_CHANNELS; i++) { 997 mad_intra = readl(imc->window + MAD_INTRA_CH0_OFFSET + i * 4); 998 mad_dimm = readl(imc->window + MAD_DIMM_CH0_OFFSET + i * 4); 999 1000 imc->dimm_l_size[i] = MAD_DIMM_CH_DIMM_L_SIZE(mad_dimm); 1001 imc->dimm_s_size[i] = MAD_DIMM_CH_DIMM_S_SIZE(mad_dimm); 1002 imc->dimm_l_map[i] = MAD_INTRA_CH_DIMM_L_MAP(mad_intra); 1003 imc->size += imc->dimm_s_size[i]; 1004 imc->size += imc->dimm_l_size[i]; 1005 ndimms = 0; 1006 1007 for (j = 0; j < NUM_DIMMS; j++) { 1008 dimm = edac_get_dimm(mci, i, j, 0); 1009 1010 if (j ^ imc->dimm_l_map[i]) { 1011 dtype = get_width(0, mad_dimm); 1012 dsize = imc->dimm_s_size[i]; 1013 } else { 1014 dtype = get_width(1, mad_dimm); 1015 dsize = imc->dimm_l_size[i]; 1016 } 1017 1018 if (!dsize) 1019 continue; 1020 1021 dimm->grain = 64; 1022 dimm->mtype = mtype; 1023 dimm->dtype = dtype; 1024 dimm->nr_pages = MiB_TO_PAGES(dsize >> 20); 1025 dimm->edac_mode = EDAC_SECDED; 1026 snprintf(dimm->label, sizeof(dimm->label), 1027 "MC#%d_Chan#%d_DIMM#%d", mc, i, j); 1028 edac_dbg(0, "MC %d, Channel %d, DIMM %d, Size %llu MiB (%u pages)\n", 1029 mc, i, j, dsize >> 20, dimm->nr_pages); 1030 1031 ndimms++; 1032 } 1033 1034 if (ndimms && !ecc) { 1035 igen6_printk(KERN_ERR, "MC%d In-Band ECC is disabled\n", mc); 1036 return -ENODEV; 1037 } 1038 } 1039 1040 edac_dbg(0, "MC %d, total size %llu MiB\n", mc, imc->size >> 20); 1041 1042 return 0; 1043 } 1044 1045 #ifdef CONFIG_EDAC_DEBUG 1046 /* Top of upper usable DRAM */ 1047 static u64 igen6_touud; 1048 #define TOUUD_OFFSET 0xa8 1049 1050 static void igen6_reg_dump(struct igen6_imc *imc) 1051 { 1052 int i; 1053 1054 edac_dbg(2, "CHANNEL_HASH : 0x%x\n", 1055 readl(imc->window + CHANNEL_HASH_OFFSET)); 1056 edac_dbg(2, "CHANNEL_EHASH : 0x%x\n", 1057 readl(imc->window + CHANNEL_EHASH_OFFSET)); 1058 edac_dbg(2, "MAD_INTER_CHANNEL: 0x%x\n", 1059 readl(imc->window + MAD_INTER_CHANNEL_OFFSET)); 1060 edac_dbg(2, "ECC_ERROR_LOG : 0x%llx\n", 1061 readq(imc->window + ECC_ERROR_LOG_OFFSET)); 1062 1063 for (i = 0; i < NUM_CHANNELS; i++) { 1064 edac_dbg(2, "MAD_INTRA_CH%d : 0x%x\n", i, 1065 readl(imc->window + MAD_INTRA_CH0_OFFSET + i * 4)); 1066 edac_dbg(2, "MAD_DIMM_CH%d : 0x%x\n", i, 1067 readl(imc->window + MAD_DIMM_CH0_OFFSET + i * 4)); 1068 } 1069 edac_dbg(2, "TOLUD : 0x%x", igen6_tolud); 1070 edac_dbg(2, "TOUUD : 0x%llx", igen6_touud); 1071 edac_dbg(2, "TOM : 0x%llx", igen6_tom); 1072 } 1073 1074 static struct dentry *igen6_test; 1075 1076 static int debugfs_u64_set(void *data, u64 val) 1077 { 1078 u64 ecclog; 1079 1080 if ((val >= igen6_tolud && val < _4GB) || val >= igen6_touud) { 1081 edac_dbg(0, "Address 0x%llx out of range\n", val); 1082 return 0; 1083 } 1084 1085 pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val); 1086 1087 val >>= ECC_ERROR_LOG_ADDR_SHIFT; 1088 ecclog = (val << ECC_ERROR_LOG_ADDR_SHIFT) | ECC_ERROR_LOG_CE; 1089 1090 if (!ecclog_gen_pool_add(0, ecclog)) 1091 irq_work_queue(&ecclog_irq_work); 1092 1093 return 0; 1094 } 1095 DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n"); 1096 1097 static void igen6_debug_setup(void) 1098 { 1099 igen6_test = edac_debugfs_create_dir("igen6_test"); 1100 if (!igen6_test) 1101 return; 1102 1103 if (!edac_debugfs_create_file("addr", 0200, igen6_test, 1104 NULL, &fops_u64_wo)) { 1105 debugfs_remove(igen6_test); 1106 igen6_test = NULL; 1107 } 1108 } 1109 1110 static void igen6_debug_teardown(void) 1111 { 1112 debugfs_remove_recursive(igen6_test); 1113 } 1114 #else 1115 static void igen6_reg_dump(struct igen6_imc *imc) {} 1116 static void igen6_debug_setup(void) {} 1117 static void igen6_debug_teardown(void) {} 1118 #endif 1119 1120 static int igen6_pci_setup(struct pci_dev *pdev, u64 *mchbar) 1121 { 1122 union { 1123 u64 v; 1124 struct { 1125 u32 v_lo; 1126 u32 v_hi; 1127 }; 1128 } u; 1129 1130 edac_dbg(2, "\n"); 1131 1132 if (!res_cfg->ibecc_available(pdev)) { 1133 edac_dbg(2, "No In-Band ECC IP\n"); 1134 goto fail; 1135 } 1136 1137 if (pci_read_config_dword(pdev, TOLUD_OFFSET, &igen6_tolud)) { 1138 igen6_printk(KERN_ERR, "Failed to read TOLUD\n"); 1139 goto fail; 1140 } 1141 1142 igen6_tolud &= GENMASK(31, 20); 1143 1144 if (pci_read_config_dword(pdev, TOM_OFFSET, &u.v_lo)) { 1145 igen6_printk(KERN_ERR, "Failed to read lower TOM\n"); 1146 goto fail; 1147 } 1148 1149 if (pci_read_config_dword(pdev, TOM_OFFSET + 4, &u.v_hi)) { 1150 igen6_printk(KERN_ERR, "Failed to read upper TOM\n"); 1151 goto fail; 1152 } 1153 1154 igen6_tom = u.v & GENMASK_ULL(38, 20); 1155 1156 if (get_mchbar(pdev, mchbar)) 1157 goto fail; 1158 1159 #ifdef CONFIG_EDAC_DEBUG 1160 if (pci_read_config_dword(pdev, TOUUD_OFFSET, &u.v_lo)) 1161 edac_dbg(2, "Failed to read lower TOUUD\n"); 1162 else if (pci_read_config_dword(pdev, TOUUD_OFFSET + 4, &u.v_hi)) 1163 edac_dbg(2, "Failed to read upper TOUUD\n"); 1164 else 1165 igen6_touud = u.v & GENMASK_ULL(38, 20); 1166 #endif 1167 1168 return 0; 1169 fail: 1170 return -ENODEV; 1171 } 1172 1173 static int igen6_register_mci(int mc, u64 mchbar, struct pci_dev *pdev) 1174 { 1175 struct edac_mc_layer layers[2]; 1176 struct mem_ctl_info *mci; 1177 struct igen6_imc *imc; 1178 void __iomem *window; 1179 int rc; 1180 1181 edac_dbg(2, "\n"); 1182 1183 mchbar += mc * MCHBAR_SIZE; 1184 window = ioremap(mchbar, MCHBAR_SIZE); 1185 if (!window) { 1186 igen6_printk(KERN_ERR, "Failed to ioremap 0x%llx\n", mchbar); 1187 return -ENODEV; 1188 } 1189 1190 layers[0].type = EDAC_MC_LAYER_CHANNEL; 1191 layers[0].size = NUM_CHANNELS; 1192 layers[0].is_virt_csrow = false; 1193 layers[1].type = EDAC_MC_LAYER_SLOT; 1194 layers[1].size = NUM_DIMMS; 1195 layers[1].is_virt_csrow = true; 1196 1197 mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, 0); 1198 if (!mci) { 1199 rc = -ENOMEM; 1200 goto fail; 1201 } 1202 1203 mci->ctl_name = kasprintf(GFP_KERNEL, "Intel_client_SoC MC#%d", mc); 1204 if (!mci->ctl_name) { 1205 rc = -ENOMEM; 1206 goto fail2; 1207 } 1208 1209 mci->mtype_cap = MEM_FLAG_LPDDR4 | MEM_FLAG_DDR4; 1210 mci->edac_ctl_cap = EDAC_FLAG_SECDED; 1211 mci->edac_cap = EDAC_FLAG_SECDED; 1212 mci->mod_name = EDAC_MOD_STR; 1213 mci->dev_name = pci_name(pdev); 1214 mci->pvt_info = &igen6_pvt->imc[mc]; 1215 1216 imc = mci->pvt_info; 1217 device_initialize(&imc->dev); 1218 /* 1219 * EDAC core uses mci->pdev(pointer of structure device) as 1220 * memory controller ID. The client SoCs attach one or more 1221 * memory controllers to single pci_dev (single pci_dev->dev 1222 * can be for multiple memory controllers). 1223 * 1224 * To make mci->pdev unique, assign pci_dev->dev to mci->pdev 1225 * for the first memory controller and assign a unique imc->dev 1226 * to mci->pdev for each non-first memory controller. 1227 */ 1228 mci->pdev = mc ? &imc->dev : &pdev->dev; 1229 imc->mc = mc; 1230 imc->pdev = pdev; 1231 imc->window = window; 1232 1233 igen6_reg_dump(imc); 1234 1235 rc = igen6_get_dimm_config(mci); 1236 if (rc) 1237 goto fail3; 1238 1239 rc = edac_mc_add_mc(mci); 1240 if (rc) { 1241 igen6_printk(KERN_ERR, "Failed to register mci#%d\n", mc); 1242 goto fail3; 1243 } 1244 1245 imc->mci = mci; 1246 return 0; 1247 fail3: 1248 kfree(mci->ctl_name); 1249 fail2: 1250 edac_mc_free(mci); 1251 fail: 1252 iounmap(window); 1253 return rc; 1254 } 1255 1256 static void igen6_unregister_mcis(void) 1257 { 1258 struct mem_ctl_info *mci; 1259 struct igen6_imc *imc; 1260 int i; 1261 1262 edac_dbg(2, "\n"); 1263 1264 for (i = 0; i < res_cfg->num_imc; i++) { 1265 imc = &igen6_pvt->imc[i]; 1266 mci = imc->mci; 1267 if (!mci) 1268 continue; 1269 1270 edac_mc_del_mc(mci->pdev); 1271 kfree(mci->ctl_name); 1272 edac_mc_free(mci); 1273 iounmap(imc->window); 1274 } 1275 } 1276 1277 static int igen6_mem_slice_setup(u64 mchbar) 1278 { 1279 struct igen6_imc *imc = &igen6_pvt->imc[0]; 1280 u64 base = mchbar + res_cfg->cmf_base; 1281 u32 offset = res_cfg->ms_hash_offset; 1282 u32 size = res_cfg->cmf_size; 1283 u64 ms_s_size, ms_hash; 1284 void __iomem *cmf; 1285 int ms_l_map; 1286 1287 edac_dbg(2, "\n"); 1288 1289 if (imc[0].size < imc[1].size) { 1290 ms_s_size = imc[0].size; 1291 ms_l_map = 1; 1292 } else { 1293 ms_s_size = imc[1].size; 1294 ms_l_map = 0; 1295 } 1296 1297 igen6_pvt->ms_s_size = ms_s_size; 1298 igen6_pvt->ms_l_map = ms_l_map; 1299 1300 edac_dbg(0, "ms_s_size: %llu MiB, ms_l_map %d\n", 1301 ms_s_size >> 20, ms_l_map); 1302 1303 if (!size) 1304 return 0; 1305 1306 cmf = ioremap(base, size); 1307 if (!cmf) { 1308 igen6_printk(KERN_ERR, "Failed to ioremap cmf 0x%llx\n", base); 1309 return -ENODEV; 1310 } 1311 1312 ms_hash = readq(cmf + offset); 1313 igen6_pvt->ms_hash = ms_hash; 1314 1315 edac_dbg(0, "MEM_SLICE_HASH: 0x%llx\n", ms_hash); 1316 1317 iounmap(cmf); 1318 1319 return 0; 1320 } 1321 1322 static int register_err_handler(void) 1323 { 1324 int rc; 1325 1326 if (res_cfg->machine_check) { 1327 mce_register_decode_chain(&ecclog_mce_dec); 1328 return 0; 1329 } 1330 1331 rc = register_nmi_handler(NMI_SERR, ecclog_nmi_handler, 1332 0, IGEN6_NMI_NAME); 1333 if (rc) { 1334 igen6_printk(KERN_ERR, "Failed to register NMI handler\n"); 1335 return rc; 1336 } 1337 1338 return 0; 1339 } 1340 1341 static void unregister_err_handler(void) 1342 { 1343 if (res_cfg->machine_check) { 1344 mce_unregister_decode_chain(&ecclog_mce_dec); 1345 return; 1346 } 1347 1348 unregister_nmi_handler(NMI_SERR, IGEN6_NMI_NAME); 1349 } 1350 1351 static int igen6_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1352 { 1353 u64 mchbar; 1354 int i, rc; 1355 1356 edac_dbg(2, "\n"); 1357 1358 igen6_pvt = kzalloc(sizeof(*igen6_pvt), GFP_KERNEL); 1359 if (!igen6_pvt) 1360 return -ENOMEM; 1361 1362 res_cfg = (struct res_config *)ent->driver_data; 1363 1364 rc = igen6_pci_setup(pdev, &mchbar); 1365 if (rc) 1366 goto fail; 1367 1368 for (i = 0; i < res_cfg->num_imc; i++) { 1369 rc = igen6_register_mci(i, mchbar, pdev); 1370 if (rc) 1371 goto fail2; 1372 } 1373 1374 if (res_cfg->num_imc > 1) { 1375 rc = igen6_mem_slice_setup(mchbar); 1376 if (rc) 1377 goto fail2; 1378 } 1379 1380 ecclog_pool = ecclog_gen_pool_create(); 1381 if (!ecclog_pool) { 1382 rc = -ENOMEM; 1383 goto fail2; 1384 } 1385 1386 INIT_WORK(&ecclog_work, ecclog_work_cb); 1387 init_irq_work(&ecclog_irq_work, ecclog_irq_work_cb); 1388 1389 rc = register_err_handler(); 1390 if (rc) 1391 goto fail3; 1392 1393 /* Enable error reporting */ 1394 rc = errcmd_enable_error_reporting(true); 1395 if (rc) { 1396 igen6_printk(KERN_ERR, "Failed to enable error reporting\n"); 1397 goto fail4; 1398 } 1399 1400 /* Check if any pending errors before/during the registration of the error handler */ 1401 ecclog_handler(); 1402 1403 igen6_debug_setup(); 1404 return 0; 1405 fail4: 1406 unregister_nmi_handler(NMI_SERR, IGEN6_NMI_NAME); 1407 fail3: 1408 gen_pool_destroy(ecclog_pool); 1409 fail2: 1410 igen6_unregister_mcis(); 1411 fail: 1412 kfree(igen6_pvt); 1413 return rc; 1414 } 1415 1416 static void igen6_remove(struct pci_dev *pdev) 1417 { 1418 edac_dbg(2, "\n"); 1419 1420 igen6_debug_teardown(); 1421 errcmd_enable_error_reporting(false); 1422 unregister_err_handler(); 1423 irq_work_sync(&ecclog_irq_work); 1424 flush_work(&ecclog_work); 1425 gen_pool_destroy(ecclog_pool); 1426 igen6_unregister_mcis(); 1427 kfree(igen6_pvt); 1428 } 1429 1430 static struct pci_driver igen6_driver = { 1431 .name = EDAC_MOD_STR, 1432 .probe = igen6_probe, 1433 .remove = igen6_remove, 1434 .id_table = igen6_pci_tbl, 1435 }; 1436 1437 static int __init igen6_init(void) 1438 { 1439 const char *owner; 1440 int rc; 1441 1442 edac_dbg(2, "\n"); 1443 1444 if (ghes_get_devices()) 1445 return -EBUSY; 1446 1447 owner = edac_get_owner(); 1448 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR))) 1449 return -EBUSY; 1450 1451 edac_op_state = EDAC_OPSTATE_NMI; 1452 1453 rc = pci_register_driver(&igen6_driver); 1454 if (rc) 1455 return rc; 1456 1457 igen6_printk(KERN_INFO, "%s\n", IGEN6_REVISION); 1458 1459 return 0; 1460 } 1461 1462 static void __exit igen6_exit(void) 1463 { 1464 edac_dbg(2, "\n"); 1465 1466 pci_unregister_driver(&igen6_driver); 1467 } 1468 1469 module_init(igen6_init); 1470 module_exit(igen6_exit); 1471 1472 MODULE_LICENSE("GPL v2"); 1473 MODULE_AUTHOR("Qiuxu Zhuo"); 1474 MODULE_DESCRIPTION("MC Driver for Intel client SoC using In-Band ECC"); 1475