1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Driver for Intel(R) 10nm server memory controller. 4 * Copyright (c) 2019, Intel Corporation. 5 * 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/io.h> 10 #include <asm/cpu_device_id.h> 11 #include <asm/intel-family.h> 12 #include <asm/mce.h> 13 #include "edac_module.h" 14 #include "skx_common.h" 15 16 #define I10NM_REVISION "v0.0.5" 17 #define EDAC_MOD_STR "i10nm_edac" 18 19 /* Debug macros */ 20 #define i10nm_printk(level, fmt, arg...) \ 21 edac_printk(level, "i10nm", fmt, ##arg) 22 23 #define I10NM_GET_SCK_BAR(d, reg) \ 24 pci_read_config_dword((d)->uracu, 0xd0, &(reg)) 25 #define I10NM_GET_IMC_BAR(d, i, reg) \ 26 pci_read_config_dword((d)->uracu, 0xd8 + (i) * 4, &(reg)) 27 #define I10NM_GET_SAD(d, offset, i, reg)\ 28 pci_read_config_dword((d)->sad_all, (offset) + (i) * 8, &(reg)) 29 #define I10NM_GET_HBM_IMC_BAR(d, reg) \ 30 pci_read_config_dword((d)->uracu, 0xd4, &(reg)) 31 #define I10NM_GET_CAPID3_CFG(d, reg) \ 32 pci_read_config_dword((d)->pcu_cr3, 0x90, &(reg)) 33 #define I10NM_GET_DIMMMTR(m, i, j) \ 34 readl((m)->mbase + ((m)->hbm_mc ? 0x80c : 0x2080c) + \ 35 (i) * (m)->chan_mmio_sz + (j) * 4) 36 #define I10NM_GET_MCDDRTCFG(m, i) \ 37 readl((m)->mbase + ((m)->hbm_mc ? 0x970 : 0x20970) + \ 38 (i) * (m)->chan_mmio_sz) 39 #define I10NM_GET_MCMTR(m, i) \ 40 readl((m)->mbase + ((m)->hbm_mc ? 0xef8 : 0x20ef8) + \ 41 (i) * (m)->chan_mmio_sz) 42 #define I10NM_GET_AMAP(m, i) \ 43 readl((m)->mbase + ((m)->hbm_mc ? 0x814 : 0x20814) + \ 44 (i) * (m)->chan_mmio_sz) 45 #define I10NM_GET_REG32(m, i, offset) \ 46 readl((m)->mbase + (i) * (m)->chan_mmio_sz + (offset)) 47 #define I10NM_GET_REG64(m, i, offset) \ 48 readq((m)->mbase + (i) * (m)->chan_mmio_sz + (offset)) 49 #define I10NM_SET_REG32(m, i, offset, v) \ 50 writel(v, (m)->mbase + (i) * (m)->chan_mmio_sz + (offset)) 51 52 #define I10NM_GET_SCK_MMIO_BASE(reg) (GET_BITFIELD(reg, 0, 28) << 23) 53 #define I10NM_GET_IMC_MMIO_OFFSET(reg) (GET_BITFIELD(reg, 0, 10) << 12) 54 #define I10NM_GET_IMC_MMIO_SIZE(reg) ((GET_BITFIELD(reg, 13, 23) - \ 55 GET_BITFIELD(reg, 0, 10) + 1) << 12) 56 #define I10NM_GET_HBM_IMC_MMIO_OFFSET(reg) \ 57 ((GET_BITFIELD(reg, 0, 10) << 12) + 0x140000) 58 59 #define I10NM_HBM_IMC_MMIO_SIZE 0x9000 60 #define I10NM_IS_HBM_PRESENT(reg) GET_BITFIELD(reg, 27, 30) 61 #define I10NM_IS_HBM_IMC(reg) GET_BITFIELD(reg, 29, 29) 62 63 #define I10NM_MAX_SAD 16 64 #define I10NM_SAD_ENABLE(reg) GET_BITFIELD(reg, 0, 0) 65 #define I10NM_SAD_NM_CACHEABLE(reg) GET_BITFIELD(reg, 5, 5) 66 67 #define RETRY_RD_ERR_LOG_UC BIT(1) 68 #define RETRY_RD_ERR_LOG_NOOVER BIT(14) 69 #define RETRY_RD_ERR_LOG_EN BIT(15) 70 #define RETRY_RD_ERR_LOG_NOOVER_UC (BIT(14) | BIT(1)) 71 #define RETRY_RD_ERR_LOG_OVER_UC_V (BIT(2) | BIT(1) | BIT(0)) 72 73 static struct list_head *i10nm_edac_list; 74 75 static struct res_config *res_cfg; 76 static int retry_rd_err_log; 77 static int decoding_via_mca; 78 static bool mem_cfg_2lm; 79 80 static u32 offsets_scrub_icx[] = {0x22c60, 0x22c54, 0x22c5c, 0x22c58, 0x22c28, 0x20ed8}; 81 static u32 offsets_scrub_spr[] = {0x22c60, 0x22c54, 0x22f08, 0x22c58, 0x22c28, 0x20ed8}; 82 static u32 offsets_scrub_spr_hbm0[] = {0x2860, 0x2854, 0x2b08, 0x2858, 0x2828, 0x0ed8}; 83 static u32 offsets_scrub_spr_hbm1[] = {0x2c60, 0x2c54, 0x2f08, 0x2c58, 0x2c28, 0x0fa8}; 84 static u32 offsets_demand_icx[] = {0x22e54, 0x22e60, 0x22e64, 0x22e58, 0x22e5c, 0x20ee0}; 85 static u32 offsets_demand_spr[] = {0x22e54, 0x22e60, 0x22f10, 0x22e58, 0x22e5c, 0x20ee0}; 86 static u32 offsets_demand2_spr[] = {0x22c70, 0x22d80, 0x22f18, 0x22d58, 0x22c64, 0x20f10}; 87 static u32 offsets_demand_spr_hbm0[] = {0x2a54, 0x2a60, 0x2b10, 0x2a58, 0x2a5c, 0x0ee0}; 88 static u32 offsets_demand_spr_hbm1[] = {0x2e54, 0x2e60, 0x2f10, 0x2e58, 0x2e5c, 0x0fb0}; 89 90 static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable, 91 u32 *offsets_scrub, u32 *offsets_demand, 92 u32 *offsets_demand2) 93 { 94 u32 s, d, d2; 95 96 s = I10NM_GET_REG32(imc, chan, offsets_scrub[0]); 97 d = I10NM_GET_REG32(imc, chan, offsets_demand[0]); 98 if (offsets_demand2) 99 d2 = I10NM_GET_REG32(imc, chan, offsets_demand2[0]); 100 101 if (enable) { 102 /* Save default configurations */ 103 imc->chan[chan].retry_rd_err_log_s = s; 104 imc->chan[chan].retry_rd_err_log_d = d; 105 if (offsets_demand2) 106 imc->chan[chan].retry_rd_err_log_d2 = d2; 107 108 s &= ~RETRY_RD_ERR_LOG_NOOVER_UC; 109 s |= RETRY_RD_ERR_LOG_EN; 110 d &= ~RETRY_RD_ERR_LOG_NOOVER_UC; 111 d |= RETRY_RD_ERR_LOG_EN; 112 113 if (offsets_demand2) { 114 d2 &= ~RETRY_RD_ERR_LOG_UC; 115 d2 |= RETRY_RD_ERR_LOG_NOOVER; 116 d2 |= RETRY_RD_ERR_LOG_EN; 117 } 118 } else { 119 /* Restore default configurations */ 120 if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_UC) 121 s |= RETRY_RD_ERR_LOG_UC; 122 if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_NOOVER) 123 s |= RETRY_RD_ERR_LOG_NOOVER; 124 if (!(imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_EN)) 125 s &= ~RETRY_RD_ERR_LOG_EN; 126 if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_UC) 127 d |= RETRY_RD_ERR_LOG_UC; 128 if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_NOOVER) 129 d |= RETRY_RD_ERR_LOG_NOOVER; 130 if (!(imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_EN)) 131 d &= ~RETRY_RD_ERR_LOG_EN; 132 133 if (offsets_demand2) { 134 if (imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_UC) 135 d2 |= RETRY_RD_ERR_LOG_UC; 136 if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_NOOVER)) 137 d2 &= ~RETRY_RD_ERR_LOG_NOOVER; 138 if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_EN)) 139 d2 &= ~RETRY_RD_ERR_LOG_EN; 140 } 141 } 142 143 I10NM_SET_REG32(imc, chan, offsets_scrub[0], s); 144 I10NM_SET_REG32(imc, chan, offsets_demand[0], d); 145 if (offsets_demand2) 146 I10NM_SET_REG32(imc, chan, offsets_demand2[0], d2); 147 } 148 149 static void enable_retry_rd_err_log(bool enable) 150 { 151 struct skx_imc *imc; 152 struct skx_dev *d; 153 int i, j; 154 155 edac_dbg(2, "\n"); 156 157 list_for_each_entry(d, i10nm_edac_list, list) 158 for (i = 0; i < I10NM_NUM_IMC; i++) { 159 imc = &d->imc[i]; 160 if (!imc->mbase) 161 continue; 162 163 for (j = 0; j < I10NM_NUM_CHANNELS; j++) { 164 if (imc->hbm_mc) { 165 __enable_retry_rd_err_log(imc, j, enable, 166 res_cfg->offsets_scrub_hbm0, 167 res_cfg->offsets_demand_hbm0, 168 NULL); 169 __enable_retry_rd_err_log(imc, j, enable, 170 res_cfg->offsets_scrub_hbm1, 171 res_cfg->offsets_demand_hbm1, 172 NULL); 173 } else { 174 __enable_retry_rd_err_log(imc, j, enable, 175 res_cfg->offsets_scrub, 176 res_cfg->offsets_demand, 177 res_cfg->offsets_demand2); 178 } 179 } 180 } 181 } 182 183 static void show_retry_rd_err_log(struct decoded_addr *res, char *msg, 184 int len, bool scrub_err) 185 { 186 struct skx_imc *imc = &res->dev->imc[res->imc]; 187 u32 log0, log1, log2, log3, log4; 188 u32 corr0, corr1, corr2, corr3; 189 u32 lxg0, lxg1, lxg3, lxg4; 190 u32 *xffsets = NULL; 191 u64 log2a, log5; 192 u64 lxg2a, lxg5; 193 u32 *offsets; 194 int n, pch; 195 196 if (!imc->mbase) 197 return; 198 199 if (imc->hbm_mc) { 200 pch = res->cs & 1; 201 202 if (pch) 203 offsets = scrub_err ? res_cfg->offsets_scrub_hbm1 : 204 res_cfg->offsets_demand_hbm1; 205 else 206 offsets = scrub_err ? res_cfg->offsets_scrub_hbm0 : 207 res_cfg->offsets_demand_hbm0; 208 } else { 209 if (scrub_err) { 210 offsets = res_cfg->offsets_scrub; 211 } else { 212 offsets = res_cfg->offsets_demand; 213 xffsets = res_cfg->offsets_demand2; 214 } 215 } 216 217 log0 = I10NM_GET_REG32(imc, res->channel, offsets[0]); 218 log1 = I10NM_GET_REG32(imc, res->channel, offsets[1]); 219 log3 = I10NM_GET_REG32(imc, res->channel, offsets[3]); 220 log4 = I10NM_GET_REG32(imc, res->channel, offsets[4]); 221 log5 = I10NM_GET_REG64(imc, res->channel, offsets[5]); 222 223 if (xffsets) { 224 lxg0 = I10NM_GET_REG32(imc, res->channel, xffsets[0]); 225 lxg1 = I10NM_GET_REG32(imc, res->channel, xffsets[1]); 226 lxg3 = I10NM_GET_REG32(imc, res->channel, xffsets[3]); 227 lxg4 = I10NM_GET_REG32(imc, res->channel, xffsets[4]); 228 lxg5 = I10NM_GET_REG64(imc, res->channel, xffsets[5]); 229 } 230 231 if (res_cfg->type == SPR) { 232 log2a = I10NM_GET_REG64(imc, res->channel, offsets[2]); 233 n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.16llx %.8x %.8x %.16llx", 234 log0, log1, log2a, log3, log4, log5); 235 236 if (len - n > 0) { 237 if (xffsets) { 238 lxg2a = I10NM_GET_REG64(imc, res->channel, xffsets[2]); 239 n += snprintf(msg + n, len - n, " %.8x %.8x %.16llx %.8x %.8x %.16llx]", 240 lxg0, lxg1, lxg2a, lxg3, lxg4, lxg5); 241 } else { 242 n += snprintf(msg + n, len - n, "]"); 243 } 244 } 245 } else { 246 log2 = I10NM_GET_REG32(imc, res->channel, offsets[2]); 247 n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.8x %.8x %.8x %.16llx]", 248 log0, log1, log2, log3, log4, log5); 249 } 250 251 if (imc->hbm_mc) { 252 if (pch) { 253 corr0 = I10NM_GET_REG32(imc, res->channel, 0x2c18); 254 corr1 = I10NM_GET_REG32(imc, res->channel, 0x2c1c); 255 corr2 = I10NM_GET_REG32(imc, res->channel, 0x2c20); 256 corr3 = I10NM_GET_REG32(imc, res->channel, 0x2c24); 257 } else { 258 corr0 = I10NM_GET_REG32(imc, res->channel, 0x2818); 259 corr1 = I10NM_GET_REG32(imc, res->channel, 0x281c); 260 corr2 = I10NM_GET_REG32(imc, res->channel, 0x2820); 261 corr3 = I10NM_GET_REG32(imc, res->channel, 0x2824); 262 } 263 } else { 264 corr0 = I10NM_GET_REG32(imc, res->channel, 0x22c18); 265 corr1 = I10NM_GET_REG32(imc, res->channel, 0x22c1c); 266 corr2 = I10NM_GET_REG32(imc, res->channel, 0x22c20); 267 corr3 = I10NM_GET_REG32(imc, res->channel, 0x22c24); 268 } 269 270 if (len - n > 0) 271 snprintf(msg + n, len - n, 272 " correrrcnt[%.4x %.4x %.4x %.4x %.4x %.4x %.4x %.4x]", 273 corr0 & 0xffff, corr0 >> 16, 274 corr1 & 0xffff, corr1 >> 16, 275 corr2 & 0xffff, corr2 >> 16, 276 corr3 & 0xffff, corr3 >> 16); 277 278 /* Clear status bits */ 279 if (retry_rd_err_log == 2) { 280 if (log0 & RETRY_RD_ERR_LOG_OVER_UC_V) { 281 log0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V; 282 I10NM_SET_REG32(imc, res->channel, offsets[0], log0); 283 } 284 285 if (xffsets && (lxg0 & RETRY_RD_ERR_LOG_OVER_UC_V)) { 286 lxg0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V; 287 I10NM_SET_REG32(imc, res->channel, xffsets[0], lxg0); 288 } 289 } 290 } 291 292 static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus, 293 unsigned int dev, unsigned int fun) 294 { 295 struct pci_dev *pdev; 296 297 pdev = pci_get_domain_bus_and_slot(dom, bus, PCI_DEVFN(dev, fun)); 298 if (!pdev) { 299 edac_dbg(2, "No device %02x:%02x.%x\n", 300 bus, dev, fun); 301 return NULL; 302 } 303 304 if (unlikely(pci_enable_device(pdev) < 0)) { 305 edac_dbg(2, "Failed to enable device %02x:%02x.%x\n", 306 bus, dev, fun); 307 pci_dev_put(pdev); 308 return NULL; 309 } 310 311 return pdev; 312 } 313 314 static bool i10nm_check_2lm(struct res_config *cfg) 315 { 316 struct skx_dev *d; 317 u32 reg; 318 int i; 319 320 list_for_each_entry(d, i10nm_edac_list, list) { 321 d->sad_all = pci_get_dev_wrapper(d->seg, d->bus[1], 322 PCI_SLOT(cfg->sad_all_devfn), 323 PCI_FUNC(cfg->sad_all_devfn)); 324 if (!d->sad_all) 325 continue; 326 327 for (i = 0; i < I10NM_MAX_SAD; i++) { 328 I10NM_GET_SAD(d, cfg->sad_all_offset, i, reg); 329 if (I10NM_SAD_ENABLE(reg) && I10NM_SAD_NM_CACHEABLE(reg)) { 330 edac_dbg(2, "2-level memory configuration.\n"); 331 return true; 332 } 333 } 334 } 335 336 return false; 337 } 338 339 /* 340 * Check whether the error comes from DDRT by ICX/Tremont model specific error code. 341 * Refer to SDM vol3B 16.11.3 Intel IMC MC error codes for IA32_MCi_STATUS. 342 */ 343 static bool i10nm_mscod_is_ddrt(u32 mscod) 344 { 345 switch (mscod) { 346 case 0x0106: case 0x0107: 347 case 0x0800: case 0x0804: 348 case 0x0806 ... 0x0808: 349 case 0x080a ... 0x080e: 350 case 0x0810: case 0x0811: 351 case 0x0816: case 0x081e: 352 case 0x081f: 353 return true; 354 } 355 356 return false; 357 } 358 359 static bool i10nm_mc_decode_available(struct mce *mce) 360 { 361 u8 bank; 362 363 if (!decoding_via_mca || mem_cfg_2lm) 364 return false; 365 366 if ((mce->status & (MCI_STATUS_MISCV | MCI_STATUS_ADDRV)) 367 != (MCI_STATUS_MISCV | MCI_STATUS_ADDRV)) 368 return false; 369 370 bank = mce->bank; 371 372 switch (res_cfg->type) { 373 case I10NM: 374 if (bank < 13 || bank > 26) 375 return false; 376 377 /* DDRT errors can't be decoded from MCA bank registers */ 378 if (MCI_MISC_ECC_MODE(mce->misc) == MCI_MISC_ECC_DDRT) 379 return false; 380 381 if (i10nm_mscod_is_ddrt(MCI_STATUS_MSCOD(mce->status))) 382 return false; 383 384 /* Check whether one of {13,14,17,18,21,22,25,26} */ 385 return ((bank - 13) & BIT(1)) == 0; 386 default: 387 return false; 388 } 389 } 390 391 static bool i10nm_mc_decode(struct decoded_addr *res) 392 { 393 struct mce *m = res->mce; 394 struct skx_dev *d; 395 u8 bank; 396 397 if (!i10nm_mc_decode_available(m)) 398 return false; 399 400 list_for_each_entry(d, i10nm_edac_list, list) { 401 if (d->imc[0].src_id == m->socketid) { 402 res->socket = m->socketid; 403 res->dev = d; 404 break; 405 } 406 } 407 408 switch (res_cfg->type) { 409 case I10NM: 410 bank = m->bank - 13; 411 res->imc = bank / 4; 412 res->channel = bank % 2; 413 break; 414 default: 415 return false; 416 } 417 418 if (!res->dev) { 419 skx_printk(KERN_ERR, "No device for src_id %d imc %d\n", 420 m->socketid, res->imc); 421 return false; 422 } 423 424 res->column = GET_BITFIELD(m->misc, 9, 18) << 2; 425 res->row = GET_BITFIELD(m->misc, 19, 39); 426 res->bank_group = GET_BITFIELD(m->misc, 40, 41); 427 res->bank_address = GET_BITFIELD(m->misc, 42, 43); 428 res->bank_group |= GET_BITFIELD(m->misc, 44, 44) << 2; 429 res->rank = GET_BITFIELD(m->misc, 56, 58); 430 res->dimm = res->rank >> 2; 431 res->rank = res->rank % 4; 432 433 return true; 434 } 435 436 static int i10nm_get_ddr_munits(void) 437 { 438 struct pci_dev *mdev; 439 void __iomem *mbase; 440 unsigned long size; 441 struct skx_dev *d; 442 int i, j = 0; 443 u32 reg, off; 444 u64 base; 445 446 list_for_each_entry(d, i10nm_edac_list, list) { 447 d->util_all = pci_get_dev_wrapper(d->seg, d->bus[1], 29, 1); 448 if (!d->util_all) 449 return -ENODEV; 450 451 d->uracu = pci_get_dev_wrapper(d->seg, d->bus[0], 0, 1); 452 if (!d->uracu) 453 return -ENODEV; 454 455 if (I10NM_GET_SCK_BAR(d, reg)) { 456 i10nm_printk(KERN_ERR, "Failed to socket bar\n"); 457 return -ENODEV; 458 } 459 460 base = I10NM_GET_SCK_MMIO_BASE(reg); 461 edac_dbg(2, "socket%d mmio base 0x%llx (reg 0x%x)\n", 462 j++, base, reg); 463 464 for (i = 0; i < I10NM_NUM_DDR_IMC; i++) { 465 mdev = pci_get_dev_wrapper(d->seg, d->bus[0], 466 12 + i, 0); 467 if (i == 0 && !mdev) { 468 i10nm_printk(KERN_ERR, "No IMC found\n"); 469 return -ENODEV; 470 } 471 if (!mdev) 472 continue; 473 474 d->imc[i].mdev = mdev; 475 476 if (I10NM_GET_IMC_BAR(d, i, reg)) { 477 i10nm_printk(KERN_ERR, "Failed to get mc bar\n"); 478 return -ENODEV; 479 } 480 481 off = I10NM_GET_IMC_MMIO_OFFSET(reg); 482 size = I10NM_GET_IMC_MMIO_SIZE(reg); 483 edac_dbg(2, "mc%d mmio base 0x%llx size 0x%lx (reg 0x%x)\n", 484 i, base + off, size, reg); 485 486 mbase = ioremap(base + off, size); 487 if (!mbase) { 488 i10nm_printk(KERN_ERR, "Failed to ioremap 0x%llx\n", 489 base + off); 490 return -ENODEV; 491 } 492 493 d->imc[i].mbase = mbase; 494 } 495 } 496 497 return 0; 498 } 499 500 static bool i10nm_check_hbm_imc(struct skx_dev *d) 501 { 502 u32 reg; 503 504 if (I10NM_GET_CAPID3_CFG(d, reg)) { 505 i10nm_printk(KERN_ERR, "Failed to get capid3_cfg\n"); 506 return false; 507 } 508 509 return I10NM_IS_HBM_PRESENT(reg) != 0; 510 } 511 512 static int i10nm_get_hbm_munits(void) 513 { 514 struct pci_dev *mdev; 515 void __iomem *mbase; 516 u32 reg, off, mcmtr; 517 struct skx_dev *d; 518 int i, lmc; 519 u64 base; 520 521 list_for_each_entry(d, i10nm_edac_list, list) { 522 d->pcu_cr3 = pci_get_dev_wrapper(d->seg, d->bus[1], 30, 3); 523 if (!d->pcu_cr3) 524 return -ENODEV; 525 526 if (!i10nm_check_hbm_imc(d)) { 527 i10nm_printk(KERN_DEBUG, "No hbm memory\n"); 528 return -ENODEV; 529 } 530 531 if (I10NM_GET_SCK_BAR(d, reg)) { 532 i10nm_printk(KERN_ERR, "Failed to get socket bar\n"); 533 return -ENODEV; 534 } 535 base = I10NM_GET_SCK_MMIO_BASE(reg); 536 537 if (I10NM_GET_HBM_IMC_BAR(d, reg)) { 538 i10nm_printk(KERN_ERR, "Failed to get hbm mc bar\n"); 539 return -ENODEV; 540 } 541 base += I10NM_GET_HBM_IMC_MMIO_OFFSET(reg); 542 543 lmc = I10NM_NUM_DDR_IMC; 544 545 for (i = 0; i < I10NM_NUM_HBM_IMC; i++) { 546 mdev = pci_get_dev_wrapper(d->seg, d->bus[0], 547 12 + i / 4, 1 + i % 4); 548 if (i == 0 && !mdev) { 549 i10nm_printk(KERN_ERR, "No hbm mc found\n"); 550 return -ENODEV; 551 } 552 if (!mdev) 553 continue; 554 555 d->imc[lmc].mdev = mdev; 556 off = i * I10NM_HBM_IMC_MMIO_SIZE; 557 558 edac_dbg(2, "hbm mc%d mmio base 0x%llx size 0x%x\n", 559 lmc, base + off, I10NM_HBM_IMC_MMIO_SIZE); 560 561 mbase = ioremap(base + off, I10NM_HBM_IMC_MMIO_SIZE); 562 if (!mbase) { 563 pci_dev_put(d->imc[lmc].mdev); 564 d->imc[lmc].mdev = NULL; 565 566 i10nm_printk(KERN_ERR, "Failed to ioremap for hbm mc 0x%llx\n", 567 base + off); 568 return -ENOMEM; 569 } 570 571 d->imc[lmc].mbase = mbase; 572 d->imc[lmc].hbm_mc = true; 573 574 mcmtr = I10NM_GET_MCMTR(&d->imc[lmc], 0); 575 if (!I10NM_IS_HBM_IMC(mcmtr)) { 576 iounmap(d->imc[lmc].mbase); 577 d->imc[lmc].mbase = NULL; 578 d->imc[lmc].hbm_mc = false; 579 pci_dev_put(d->imc[lmc].mdev); 580 d->imc[lmc].mdev = NULL; 581 582 i10nm_printk(KERN_ERR, "This isn't an hbm mc!\n"); 583 return -ENODEV; 584 } 585 586 lmc++; 587 } 588 } 589 590 return 0; 591 } 592 593 static struct res_config i10nm_cfg0 = { 594 .type = I10NM, 595 .decs_did = 0x3452, 596 .busno_cfg_offset = 0xcc, 597 .ddr_chan_mmio_sz = 0x4000, 598 .sad_all_devfn = PCI_DEVFN(29, 0), 599 .sad_all_offset = 0x108, 600 .offsets_scrub = offsets_scrub_icx, 601 .offsets_demand = offsets_demand_icx, 602 }; 603 604 static struct res_config i10nm_cfg1 = { 605 .type = I10NM, 606 .decs_did = 0x3452, 607 .busno_cfg_offset = 0xd0, 608 .ddr_chan_mmio_sz = 0x4000, 609 .sad_all_devfn = PCI_DEVFN(29, 0), 610 .sad_all_offset = 0x108, 611 .offsets_scrub = offsets_scrub_icx, 612 .offsets_demand = offsets_demand_icx, 613 }; 614 615 static struct res_config spr_cfg = { 616 .type = SPR, 617 .decs_did = 0x3252, 618 .busno_cfg_offset = 0xd0, 619 .ddr_chan_mmio_sz = 0x8000, 620 .hbm_chan_mmio_sz = 0x4000, 621 .support_ddr5 = true, 622 .sad_all_devfn = PCI_DEVFN(10, 0), 623 .sad_all_offset = 0x300, 624 .offsets_scrub = offsets_scrub_spr, 625 .offsets_scrub_hbm0 = offsets_scrub_spr_hbm0, 626 .offsets_scrub_hbm1 = offsets_scrub_spr_hbm1, 627 .offsets_demand = offsets_demand_spr, 628 .offsets_demand2 = offsets_demand2_spr, 629 .offsets_demand_hbm0 = offsets_demand_spr_hbm0, 630 .offsets_demand_hbm1 = offsets_demand_spr_hbm1, 631 }; 632 633 static const struct x86_cpu_id i10nm_cpuids[] = { 634 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0), 635 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1), 636 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0), 637 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1), 638 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_D, X86_STEPPINGS(0x0, 0xf), &i10nm_cfg1), 639 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SAPPHIRERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg), 640 {} 641 }; 642 MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids); 643 644 static bool i10nm_check_ecc(struct skx_imc *imc, int chan) 645 { 646 u32 mcmtr; 647 648 mcmtr = I10NM_GET_MCMTR(imc, chan); 649 edac_dbg(1, "ch%d mcmtr reg %x\n", chan, mcmtr); 650 651 return !!GET_BITFIELD(mcmtr, 2, 2); 652 } 653 654 static int i10nm_get_dimm_config(struct mem_ctl_info *mci, 655 struct res_config *cfg) 656 { 657 struct skx_pvt *pvt = mci->pvt_info; 658 struct skx_imc *imc = pvt->imc; 659 u32 mtr, amap, mcddrtcfg; 660 struct dimm_info *dimm; 661 int i, j, ndimms; 662 663 for (i = 0; i < imc->num_channels; i++) { 664 if (!imc->mbase) 665 continue; 666 667 ndimms = 0; 668 amap = I10NM_GET_AMAP(imc, i); 669 mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i); 670 for (j = 0; j < imc->num_dimms; j++) { 671 dimm = edac_get_dimm(mci, i, j, 0); 672 mtr = I10NM_GET_DIMMMTR(imc, i, j); 673 edac_dbg(1, "dimmmtr 0x%x mcddrtcfg 0x%x (mc%d ch%d dimm%d)\n", 674 mtr, mcddrtcfg, imc->mc, i, j); 675 676 if (IS_DIMM_PRESENT(mtr)) 677 ndimms += skx_get_dimm_info(mtr, 0, amap, dimm, 678 imc, i, j, cfg); 679 else if (IS_NVDIMM_PRESENT(mcddrtcfg, j)) 680 ndimms += skx_get_nvdimm_info(dimm, imc, i, j, 681 EDAC_MOD_STR); 682 } 683 if (ndimms && !i10nm_check_ecc(imc, i)) { 684 i10nm_printk(KERN_ERR, "ECC is disabled on imc %d channel %d\n", 685 imc->mc, i); 686 return -ENODEV; 687 } 688 } 689 690 return 0; 691 } 692 693 static struct notifier_block i10nm_mce_dec = { 694 .notifier_call = skx_mce_check_error, 695 .priority = MCE_PRIO_EDAC, 696 }; 697 698 #ifdef CONFIG_EDAC_DEBUG 699 /* 700 * Debug feature. 701 * Exercise the address decode logic by writing an address to 702 * /sys/kernel/debug/edac/i10nm_test/addr. 703 */ 704 static struct dentry *i10nm_test; 705 706 static int debugfs_u64_set(void *data, u64 val) 707 { 708 struct mce m; 709 710 pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val); 711 712 memset(&m, 0, sizeof(m)); 713 /* ADDRV + MemRd + Unknown channel */ 714 m.status = MCI_STATUS_ADDRV + 0x90; 715 /* One corrected error */ 716 m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT); 717 m.addr = val; 718 skx_mce_check_error(NULL, 0, &m); 719 720 return 0; 721 } 722 DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n"); 723 724 static void setup_i10nm_debug(void) 725 { 726 i10nm_test = edac_debugfs_create_dir("i10nm_test"); 727 if (!i10nm_test) 728 return; 729 730 if (!edac_debugfs_create_file("addr", 0200, i10nm_test, 731 NULL, &fops_u64_wo)) { 732 debugfs_remove(i10nm_test); 733 i10nm_test = NULL; 734 } 735 } 736 737 static void teardown_i10nm_debug(void) 738 { 739 debugfs_remove_recursive(i10nm_test); 740 } 741 #else 742 static inline void setup_i10nm_debug(void) {} 743 static inline void teardown_i10nm_debug(void) {} 744 #endif /*CONFIG_EDAC_DEBUG*/ 745 746 static int __init i10nm_init(void) 747 { 748 u8 mc = 0, src_id = 0, node_id = 0; 749 const struct x86_cpu_id *id; 750 struct res_config *cfg; 751 const char *owner; 752 struct skx_dev *d; 753 int rc, i, off[3] = {0xd0, 0xc8, 0xcc}; 754 u64 tolm, tohm; 755 756 edac_dbg(2, "\n"); 757 758 if (ghes_get_devices()) 759 return -EBUSY; 760 761 owner = edac_get_owner(); 762 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR))) 763 return -EBUSY; 764 765 if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) 766 return -ENODEV; 767 768 id = x86_match_cpu(i10nm_cpuids); 769 if (!id) 770 return -ENODEV; 771 772 cfg = (struct res_config *)id->driver_data; 773 res_cfg = cfg; 774 775 rc = skx_get_hi_lo(0x09a2, off, &tolm, &tohm); 776 if (rc) 777 return rc; 778 779 rc = skx_get_all_bus_mappings(cfg, &i10nm_edac_list); 780 if (rc < 0) 781 goto fail; 782 if (rc == 0) { 783 i10nm_printk(KERN_ERR, "No memory controllers found\n"); 784 return -ENODEV; 785 } 786 787 mem_cfg_2lm = i10nm_check_2lm(cfg); 788 skx_set_mem_cfg(mem_cfg_2lm); 789 790 rc = i10nm_get_ddr_munits(); 791 792 if (i10nm_get_hbm_munits() && rc) 793 goto fail; 794 795 list_for_each_entry(d, i10nm_edac_list, list) { 796 rc = skx_get_src_id(d, 0xf8, &src_id); 797 if (rc < 0) 798 goto fail; 799 800 rc = skx_get_node_id(d, &node_id); 801 if (rc < 0) 802 goto fail; 803 804 edac_dbg(2, "src_id = %d node_id = %d\n", src_id, node_id); 805 for (i = 0; i < I10NM_NUM_IMC; i++) { 806 if (!d->imc[i].mdev) 807 continue; 808 809 d->imc[i].mc = mc++; 810 d->imc[i].lmc = i; 811 d->imc[i].src_id = src_id; 812 d->imc[i].node_id = node_id; 813 if (d->imc[i].hbm_mc) { 814 d->imc[i].chan_mmio_sz = cfg->hbm_chan_mmio_sz; 815 d->imc[i].num_channels = I10NM_NUM_HBM_CHANNELS; 816 d->imc[i].num_dimms = I10NM_NUM_HBM_DIMMS; 817 } else { 818 d->imc[i].chan_mmio_sz = cfg->ddr_chan_mmio_sz; 819 d->imc[i].num_channels = I10NM_NUM_DDR_CHANNELS; 820 d->imc[i].num_dimms = I10NM_NUM_DDR_DIMMS; 821 } 822 823 rc = skx_register_mci(&d->imc[i], d->imc[i].mdev, 824 "Intel_10nm Socket", EDAC_MOD_STR, 825 i10nm_get_dimm_config, cfg); 826 if (rc < 0) 827 goto fail; 828 } 829 } 830 831 rc = skx_adxl_get(); 832 if (rc) 833 goto fail; 834 835 opstate_init(); 836 mce_register_decode_chain(&i10nm_mce_dec); 837 setup_i10nm_debug(); 838 839 if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) { 840 skx_set_decode(i10nm_mc_decode, show_retry_rd_err_log); 841 if (retry_rd_err_log == 2) 842 enable_retry_rd_err_log(true); 843 } else { 844 skx_set_decode(i10nm_mc_decode, NULL); 845 } 846 847 i10nm_printk(KERN_INFO, "%s\n", I10NM_REVISION); 848 849 return 0; 850 fail: 851 skx_remove(); 852 return rc; 853 } 854 855 static void __exit i10nm_exit(void) 856 { 857 edac_dbg(2, "\n"); 858 859 if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) { 860 skx_set_decode(NULL, NULL); 861 if (retry_rd_err_log == 2) 862 enable_retry_rd_err_log(false); 863 } 864 865 teardown_i10nm_debug(); 866 mce_unregister_decode_chain(&i10nm_mce_dec); 867 skx_adxl_put(); 868 skx_remove(); 869 } 870 871 module_init(i10nm_init); 872 module_exit(i10nm_exit); 873 874 static int set_decoding_via_mca(const char *buf, const struct kernel_param *kp) 875 { 876 unsigned long val; 877 int ret; 878 879 ret = kstrtoul(buf, 0, &val); 880 881 if (ret || val > 1) 882 return -EINVAL; 883 884 if (val && mem_cfg_2lm) { 885 i10nm_printk(KERN_NOTICE, "Decoding errors via MCA banks for 2LM isn't supported yet\n"); 886 return -EIO; 887 } 888 889 ret = param_set_int(buf, kp); 890 891 return ret; 892 } 893 894 static const struct kernel_param_ops decoding_via_mca_param_ops = { 895 .set = set_decoding_via_mca, 896 .get = param_get_int, 897 }; 898 899 module_param_cb(decoding_via_mca, &decoding_via_mca_param_ops, &decoding_via_mca, 0644); 900 MODULE_PARM_DESC(decoding_via_mca, "decoding_via_mca: 0=off(default), 1=enable"); 901 902 module_param(retry_rd_err_log, int, 0444); 903 MODULE_PARM_DESC(retry_rd_err_log, "retry_rd_err_log: 0=off(default), 1=bios(Linux doesn't reset any control bits, but just reports values.), 2=linux(Linux tries to take control and resets mode bits, clear valid/UC bits after reading.)"); 904 905 MODULE_LICENSE("GPL v2"); 906 MODULE_DESCRIPTION("MC Driver for Intel 10nm server processors"); 907