1 /* Intel Sandy Bridge -EN/-EP/-EX Memory Controller kernel module 2 * 3 * This driver supports the memory controllers found on the Intel 4 * processor family Sandy Bridge. 5 * 6 * This file may be distributed under the terms of the 7 * GNU General Public License version 2 only. 8 * 9 * Copyright (c) 2011 by: 10 * Mauro Carvalho Chehab 11 */ 12 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/pci.h> 16 #include <linux/pci_ids.h> 17 #include <linux/slab.h> 18 #include <linux/delay.h> 19 #include <linux/edac.h> 20 #include <linux/mmzone.h> 21 #include <linux/smp.h> 22 #include <linux/bitmap.h> 23 #include <linux/math64.h> 24 #include <linux/mod_devicetable.h> 25 #include <asm/cpu_device_id.h> 26 #include <asm/intel-family.h> 27 #include <asm/processor.h> 28 #include <asm/mce.h> 29 30 #include "edac_module.h" 31 32 /* Static vars */ 33 static LIST_HEAD(sbridge_edac_list); 34 35 /* 36 * Alter this version for the module when modifications are made 37 */ 38 #define SBRIDGE_REVISION " Ver: 1.1.2 " 39 #define EDAC_MOD_STR "sb_edac" 40 41 /* 42 * Debug macros 43 */ 44 #define sbridge_printk(level, fmt, arg...) \ 45 edac_printk(level, "sbridge", fmt, ##arg) 46 47 #define sbridge_mc_printk(mci, level, fmt, arg...) \ 48 edac_mc_chipset_printk(mci, level, "sbridge", fmt, ##arg) 49 50 /* 51 * Get a bit field at register value <v>, from bit <lo> to bit <hi> 52 */ 53 #define GET_BITFIELD(v, lo, hi) \ 54 (((v) & GENMASK_ULL(hi, lo)) >> (lo)) 55 56 /* Devices 12 Function 6, Offsets 0x80 to 0xcc */ 57 static const u32 sbridge_dram_rule[] = { 58 0x80, 0x88, 0x90, 0x98, 0xa0, 59 0xa8, 0xb0, 0xb8, 0xc0, 0xc8, 60 }; 61 62 static const u32 ibridge_dram_rule[] = { 63 0x60, 0x68, 0x70, 0x78, 0x80, 64 0x88, 0x90, 0x98, 0xa0, 0xa8, 65 0xb0, 0xb8, 0xc0, 0xc8, 0xd0, 66 0xd8, 0xe0, 0xe8, 0xf0, 0xf8, 67 }; 68 69 static const u32 knl_dram_rule[] = { 70 0x60, 0x68, 0x70, 0x78, 0x80, /* 0-4 */ 71 0x88, 0x90, 0x98, 0xa0, 0xa8, /* 5-9 */ 72 0xb0, 0xb8, 0xc0, 0xc8, 0xd0, /* 10-14 */ 73 0xd8, 0xe0, 0xe8, 0xf0, 0xf8, /* 15-19 */ 74 0x100, 0x108, 0x110, 0x118, /* 20-23 */ 75 }; 76 77 #define DRAM_RULE_ENABLE(reg) GET_BITFIELD(reg, 0, 0) 78 #define A7MODE(reg) GET_BITFIELD(reg, 26, 26) 79 80 static char *show_dram_attr(u32 attr) 81 { 82 switch (attr) { 83 case 0: 84 return "DRAM"; 85 case 1: 86 return "MMCFG"; 87 case 2: 88 return "NXM"; 89 default: 90 return "unknown"; 91 } 92 } 93 94 static const u32 sbridge_interleave_list[] = { 95 0x84, 0x8c, 0x94, 0x9c, 0xa4, 96 0xac, 0xb4, 0xbc, 0xc4, 0xcc, 97 }; 98 99 static const u32 ibridge_interleave_list[] = { 100 0x64, 0x6c, 0x74, 0x7c, 0x84, 101 0x8c, 0x94, 0x9c, 0xa4, 0xac, 102 0xb4, 0xbc, 0xc4, 0xcc, 0xd4, 103 0xdc, 0xe4, 0xec, 0xf4, 0xfc, 104 }; 105 106 static const u32 knl_interleave_list[] = { 107 0x64, 0x6c, 0x74, 0x7c, 0x84, /* 0-4 */ 108 0x8c, 0x94, 0x9c, 0xa4, 0xac, /* 5-9 */ 109 0xb4, 0xbc, 0xc4, 0xcc, 0xd4, /* 10-14 */ 110 0xdc, 0xe4, 0xec, 0xf4, 0xfc, /* 15-19 */ 111 0x104, 0x10c, 0x114, 0x11c, /* 20-23 */ 112 }; 113 #define MAX_INTERLEAVE \ 114 (max_t(unsigned int, ARRAY_SIZE(sbridge_interleave_list), \ 115 max_t(unsigned int, ARRAY_SIZE(ibridge_interleave_list), \ 116 ARRAY_SIZE(knl_interleave_list)))) 117 118 struct interleave_pkg { 119 unsigned char start; 120 unsigned char end; 121 }; 122 123 static const struct interleave_pkg sbridge_interleave_pkg[] = { 124 { 0, 2 }, 125 { 3, 5 }, 126 { 8, 10 }, 127 { 11, 13 }, 128 { 16, 18 }, 129 { 19, 21 }, 130 { 24, 26 }, 131 { 27, 29 }, 132 }; 133 134 static const struct interleave_pkg ibridge_interleave_pkg[] = { 135 { 0, 3 }, 136 { 4, 7 }, 137 { 8, 11 }, 138 { 12, 15 }, 139 { 16, 19 }, 140 { 20, 23 }, 141 { 24, 27 }, 142 { 28, 31 }, 143 }; 144 145 static inline int sad_pkg(const struct interleave_pkg *table, u32 reg, 146 int interleave) 147 { 148 return GET_BITFIELD(reg, table[interleave].start, 149 table[interleave].end); 150 } 151 152 /* Devices 12 Function 7 */ 153 154 #define TOLM 0x80 155 #define TOHM 0x84 156 #define HASWELL_TOLM 0xd0 157 #define HASWELL_TOHM_0 0xd4 158 #define HASWELL_TOHM_1 0xd8 159 #define KNL_TOLM 0xd0 160 #define KNL_TOHM_0 0xd4 161 #define KNL_TOHM_1 0xd8 162 163 #define GET_TOLM(reg) ((GET_BITFIELD(reg, 0, 3) << 28) | 0x3ffffff) 164 #define GET_TOHM(reg) ((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff) 165 166 /* Device 13 Function 6 */ 167 168 #define SAD_TARGET 0xf0 169 170 #define SOURCE_ID(reg) GET_BITFIELD(reg, 9, 11) 171 172 #define SOURCE_ID_KNL(reg) GET_BITFIELD(reg, 12, 14) 173 174 #define SAD_CONTROL 0xf4 175 176 /* Device 14 function 0 */ 177 178 static const u32 tad_dram_rule[] = { 179 0x40, 0x44, 0x48, 0x4c, 180 0x50, 0x54, 0x58, 0x5c, 181 0x60, 0x64, 0x68, 0x6c, 182 }; 183 #define MAX_TAD ARRAY_SIZE(tad_dram_rule) 184 185 #define TAD_LIMIT(reg) ((GET_BITFIELD(reg, 12, 31) << 26) | 0x3ffffff) 186 #define TAD_SOCK(reg) GET_BITFIELD(reg, 10, 11) 187 #define TAD_CH(reg) GET_BITFIELD(reg, 8, 9) 188 #define TAD_TGT3(reg) GET_BITFIELD(reg, 6, 7) 189 #define TAD_TGT2(reg) GET_BITFIELD(reg, 4, 5) 190 #define TAD_TGT1(reg) GET_BITFIELD(reg, 2, 3) 191 #define TAD_TGT0(reg) GET_BITFIELD(reg, 0, 1) 192 193 /* Device 15, function 0 */ 194 195 #define MCMTR 0x7c 196 #define KNL_MCMTR 0x624 197 198 #define IS_ECC_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 2, 2) 199 #define IS_LOCKSTEP_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 1, 1) 200 #define IS_CLOSE_PG(mcmtr) GET_BITFIELD(mcmtr, 0, 0) 201 202 /* Device 15, function 1 */ 203 204 #define RASENABLES 0xac 205 #define IS_MIRROR_ENABLED(reg) GET_BITFIELD(reg, 0, 0) 206 207 /* Device 15, functions 2-5 */ 208 209 static const int mtr_regs[] = { 210 0x80, 0x84, 0x88, 211 }; 212 213 static const int knl_mtr_reg = 0xb60; 214 215 #define RANK_DISABLE(mtr) GET_BITFIELD(mtr, 16, 19) 216 #define IS_DIMM_PRESENT(mtr) GET_BITFIELD(mtr, 14, 14) 217 #define RANK_CNT_BITS(mtr) GET_BITFIELD(mtr, 12, 13) 218 #define RANK_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 2, 4) 219 #define COL_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 0, 1) 220 221 static const u32 tad_ch_nilv_offset[] = { 222 0x90, 0x94, 0x98, 0x9c, 223 0xa0, 0xa4, 0xa8, 0xac, 224 0xb0, 0xb4, 0xb8, 0xbc, 225 }; 226 #define CHN_IDX_OFFSET(reg) GET_BITFIELD(reg, 28, 29) 227 #define TAD_OFFSET(reg) (GET_BITFIELD(reg, 6, 25) << 26) 228 229 static const u32 rir_way_limit[] = { 230 0x108, 0x10c, 0x110, 0x114, 0x118, 231 }; 232 #define MAX_RIR_RANGES ARRAY_SIZE(rir_way_limit) 233 234 #define IS_RIR_VALID(reg) GET_BITFIELD(reg, 31, 31) 235 #define RIR_WAY(reg) GET_BITFIELD(reg, 28, 29) 236 237 #define MAX_RIR_WAY 8 238 239 static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = { 240 { 0x120, 0x124, 0x128, 0x12c, 0x130, 0x134, 0x138, 0x13c }, 241 { 0x140, 0x144, 0x148, 0x14c, 0x150, 0x154, 0x158, 0x15c }, 242 { 0x160, 0x164, 0x168, 0x16c, 0x170, 0x174, 0x178, 0x17c }, 243 { 0x180, 0x184, 0x188, 0x18c, 0x190, 0x194, 0x198, 0x19c }, 244 { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc }, 245 }; 246 247 #define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \ 248 GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19)) 249 250 #define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \ 251 GET_BITFIELD(reg, 2, 15) : GET_BITFIELD(reg, 2, 14)) 252 253 /* Device 16, functions 2-7 */ 254 255 /* 256 * FIXME: Implement the error count reads directly 257 */ 258 259 static const u32 correrrcnt[] = { 260 0x104, 0x108, 0x10c, 0x110, 261 }; 262 263 #define RANK_ODD_OV(reg) GET_BITFIELD(reg, 31, 31) 264 #define RANK_ODD_ERR_CNT(reg) GET_BITFIELD(reg, 16, 30) 265 #define RANK_EVEN_OV(reg) GET_BITFIELD(reg, 15, 15) 266 #define RANK_EVEN_ERR_CNT(reg) GET_BITFIELD(reg, 0, 14) 267 268 static const u32 correrrthrsld[] = { 269 0x11c, 0x120, 0x124, 0x128, 270 }; 271 272 #define RANK_ODD_ERR_THRSLD(reg) GET_BITFIELD(reg, 16, 30) 273 #define RANK_EVEN_ERR_THRSLD(reg) GET_BITFIELD(reg, 0, 14) 274 275 276 /* Device 17, function 0 */ 277 278 #define SB_RANK_CFG_A 0x0328 279 280 #define IB_RANK_CFG_A 0x0320 281 282 /* 283 * sbridge structs 284 */ 285 286 #define NUM_CHANNELS 6 /* Max channels per MC */ 287 #define MAX_DIMMS 3 /* Max DIMMS per channel */ 288 #define KNL_MAX_CHAS 38 /* KNL max num. of Cache Home Agents */ 289 #define KNL_MAX_CHANNELS 6 /* KNL max num. of PCI channels */ 290 #define KNL_MAX_EDCS 8 /* Embedded DRAM controllers */ 291 #define CHANNEL_UNSPECIFIED 0xf /* Intel IA32 SDM 15-14 */ 292 293 enum type { 294 SANDY_BRIDGE, 295 IVY_BRIDGE, 296 HASWELL, 297 BROADWELL, 298 KNIGHTS_LANDING, 299 }; 300 301 enum domain { 302 IMC0 = 0, 303 IMC1, 304 SOCK, 305 }; 306 307 enum mirroring_mode { 308 NON_MIRRORING, 309 ADDR_RANGE_MIRRORING, 310 FULL_MIRRORING, 311 }; 312 313 struct sbridge_pvt; 314 struct sbridge_info { 315 enum type type; 316 u32 mcmtr; 317 u32 rankcfgr; 318 u64 (*get_tolm)(struct sbridge_pvt *pvt); 319 u64 (*get_tohm)(struct sbridge_pvt *pvt); 320 u64 (*rir_limit)(u32 reg); 321 u64 (*sad_limit)(u32 reg); 322 u32 (*interleave_mode)(u32 reg); 323 u32 (*dram_attr)(u32 reg); 324 const u32 *dram_rule; 325 const u32 *interleave_list; 326 const struct interleave_pkg *interleave_pkg; 327 u8 max_sad; 328 u8 (*get_node_id)(struct sbridge_pvt *pvt); 329 enum mem_type (*get_memory_type)(struct sbridge_pvt *pvt); 330 enum dev_type (*get_width)(struct sbridge_pvt *pvt, u32 mtr); 331 struct pci_dev *pci_vtd; 332 }; 333 334 struct sbridge_channel { 335 u32 ranks; 336 u32 dimms; 337 }; 338 339 struct pci_id_descr { 340 int dev_id; 341 int optional; 342 enum domain dom; 343 }; 344 345 struct pci_id_table { 346 const struct pci_id_descr *descr; 347 int n_devs_per_imc; 348 int n_devs_per_sock; 349 int n_imcs_per_sock; 350 enum type type; 351 }; 352 353 struct sbridge_dev { 354 struct list_head list; 355 u8 bus, mc; 356 u8 node_id, source_id; 357 struct pci_dev **pdev; 358 enum domain dom; 359 int n_devs; 360 int i_devs; 361 struct mem_ctl_info *mci; 362 }; 363 364 struct knl_pvt { 365 struct pci_dev *pci_cha[KNL_MAX_CHAS]; 366 struct pci_dev *pci_channel[KNL_MAX_CHANNELS]; 367 struct pci_dev *pci_mc0; 368 struct pci_dev *pci_mc1; 369 struct pci_dev *pci_mc0_misc; 370 struct pci_dev *pci_mc1_misc; 371 struct pci_dev *pci_mc_info; /* tolm, tohm */ 372 }; 373 374 struct sbridge_pvt { 375 /* Devices per socket */ 376 struct pci_dev *pci_ddrio; 377 struct pci_dev *pci_sad0, *pci_sad1; 378 struct pci_dev *pci_br0, *pci_br1; 379 /* Devices per memory controller */ 380 struct pci_dev *pci_ha, *pci_ta, *pci_ras; 381 struct pci_dev *pci_tad[NUM_CHANNELS]; 382 383 struct sbridge_dev *sbridge_dev; 384 385 struct sbridge_info info; 386 struct sbridge_channel channel[NUM_CHANNELS]; 387 388 /* Memory type detection */ 389 bool is_cur_addr_mirrored, is_lockstep, is_close_pg; 390 bool is_chan_hash; 391 enum mirroring_mode mirror_mode; 392 393 /* Memory description */ 394 u64 tolm, tohm; 395 struct knl_pvt knl; 396 }; 397 398 #define PCI_DESCR(device_id, opt, domain) \ 399 .dev_id = (device_id), \ 400 .optional = opt, \ 401 .dom = domain 402 403 static const struct pci_id_descr pci_dev_descr_sbridge[] = { 404 /* Processor Home Agent */ 405 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0, 0, IMC0) }, 406 407 /* Memory controller */ 408 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA, 0, IMC0) }, 409 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS, 0, IMC0) }, 410 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0, 0, IMC0) }, 411 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1, 0, IMC0) }, 412 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2, 0, IMC0) }, 413 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3, 0, IMC0) }, 414 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1, SOCK) }, 415 416 /* System Address Decoder */ 417 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0, 0, SOCK) }, 418 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1, 0, SOCK) }, 419 420 /* Broadcast Registers */ 421 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0, SOCK) }, 422 }; 423 424 #define PCI_ID_TABLE_ENTRY(A, N, M, T) { \ 425 .descr = A, \ 426 .n_devs_per_imc = N, \ 427 .n_devs_per_sock = ARRAY_SIZE(A), \ 428 .n_imcs_per_sock = M, \ 429 .type = T \ 430 } 431 432 static const struct pci_id_table pci_dev_descr_sbridge_table[] = { 433 PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, ARRAY_SIZE(pci_dev_descr_sbridge), 1, SANDY_BRIDGE), 434 {0,} /* 0 terminated list. */ 435 }; 436 437 /* This changes depending if 1HA or 2HA: 438 * 1HA: 439 * 0x0eb8 (17.0) is DDRIO0 440 * 2HA: 441 * 0x0ebc (17.4) is DDRIO0 442 */ 443 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0 0x0eb8 444 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0 0x0ebc 445 446 /* pci ids */ 447 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0 0x0ea0 448 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA 0x0ea8 449 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS 0x0e71 450 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0 0x0eaa 451 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1 0x0eab 452 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2 0x0eac 453 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3 0x0ead 454 #define PCI_DEVICE_ID_INTEL_IBRIDGE_SAD 0x0ec8 455 #define PCI_DEVICE_ID_INTEL_IBRIDGE_BR0 0x0ec9 456 #define PCI_DEVICE_ID_INTEL_IBRIDGE_BR1 0x0eca 457 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1 0x0e60 458 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA 0x0e68 459 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS 0x0e79 460 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 0x0e6a 461 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1 0x0e6b 462 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2 0x0e6c 463 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3 0x0e6d 464 465 static const struct pci_id_descr pci_dev_descr_ibridge[] = { 466 /* Processor Home Agent */ 467 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0, IMC0) }, 468 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1, IMC1) }, 469 470 /* Memory controller */ 471 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0, IMC0) }, 472 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0, IMC0) }, 473 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0, IMC0) }, 474 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0, IMC0) }, 475 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0, IMC0) }, 476 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0, IMC0) }, 477 478 /* Optional, mode 2HA */ 479 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1, IMC1) }, 480 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1, IMC1) }, 481 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1, IMC1) }, 482 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1, IMC1) }, 483 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2, 1, IMC1) }, 484 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3, 1, IMC1) }, 485 486 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1, SOCK) }, 487 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1, SOCK) }, 488 489 /* System Address Decoder */ 490 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0, SOCK) }, 491 492 /* Broadcast Registers */ 493 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1, SOCK) }, 494 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0, SOCK) }, 495 496 }; 497 498 static const struct pci_id_table pci_dev_descr_ibridge_table[] = { 499 PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, 12, 2, IVY_BRIDGE), 500 {0,} /* 0 terminated list. */ 501 }; 502 503 /* Haswell support */ 504 /* EN processor: 505 * - 1 IMC 506 * - 3 DDR3 channels, 2 DPC per channel 507 * EP processor: 508 * - 1 or 2 IMC 509 * - 4 DDR4 channels, 3 DPC per channel 510 * EP 4S processor: 511 * - 2 IMC 512 * - 4 DDR4 channels, 3 DPC per channel 513 * EX processor: 514 * - 2 IMC 515 * - each IMC interfaces with a SMI 2 channel 516 * - each SMI channel interfaces with a scalable memory buffer 517 * - each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC 518 */ 519 #define HASWELL_DDRCRCLKCONTROLS 0xa10 /* Ditto on Broadwell */ 520 #define HASWELL_HASYSDEFEATURE2 0x84 521 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC 0x2f28 522 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0 0x2fa0 523 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1 0x2f60 524 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA 0x2fa8 525 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM 0x2f71 526 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA 0x2f68 527 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM 0x2f79 528 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc 529 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd 530 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa 531 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1 0x2fab 532 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2 0x2fac 533 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3 0x2fad 534 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 0x2f6a 535 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1 0x2f6b 536 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c 537 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d 538 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd 539 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1 0x2fbf 540 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2 0x2fb9 541 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3 0x2fbb 542 static const struct pci_id_descr pci_dev_descr_haswell[] = { 543 /* first item must be the HA */ 544 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0, IMC0) }, 545 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, 1, IMC1) }, 546 547 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA, 0, IMC0) }, 548 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM, 0, IMC0) }, 549 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0, IMC0) }, 550 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0, IMC0) }, 551 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1, IMC0) }, 552 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1, IMC0) }, 553 554 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1, IMC1) }, 555 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM, 1, IMC1) }, 556 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1, IMC1) }, 557 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1, IMC1) }, 558 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1, IMC1) }, 559 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1, IMC1) }, 560 561 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0, SOCK) }, 562 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0, SOCK) }, 563 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1, SOCK) }, 564 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1, 1, SOCK) }, 565 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2, 1, SOCK) }, 566 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3, 1, SOCK) }, 567 }; 568 569 static const struct pci_id_table pci_dev_descr_haswell_table[] = { 570 PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, 13, 2, HASWELL), 571 {0,} /* 0 terminated list. */ 572 }; 573 574 /* Knight's Landing Support */ 575 /* 576 * KNL's memory channels are swizzled between memory controllers. 577 * MC0 is mapped to CH3,4,5 and MC1 is mapped to CH0,1,2 578 */ 579 #define knl_channel_remap(mc, chan) ((mc) ? (chan) : (chan) + 3) 580 581 /* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */ 582 #define PCI_DEVICE_ID_INTEL_KNL_IMC_MC 0x7840 583 /* DRAM channel stuff; bank addrs, dimmmtr, etc.. 2-8-2 - 2-9-4 (6 of these) */ 584 #define PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN 0x7843 585 /* kdrwdbu TAD limits/offsets, MCMTR - 2-10-1, 2-11-1 (2 of these) */ 586 #define PCI_DEVICE_ID_INTEL_KNL_IMC_TA 0x7844 587 /* CHA broadcast registers, dram rules - 1-29-0 (1 of these) */ 588 #define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0 0x782a 589 /* SAD target - 1-29-1 (1 of these) */ 590 #define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1 0x782b 591 /* Caching / Home Agent */ 592 #define PCI_DEVICE_ID_INTEL_KNL_IMC_CHA 0x782c 593 /* Device with TOLM and TOHM, 0-5-0 (1 of these) */ 594 #define PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM 0x7810 595 596 /* 597 * KNL differs from SB, IB, and Haswell in that it has multiple 598 * instances of the same device with the same device ID, so we handle that 599 * by creating as many copies in the table as we expect to find. 600 * (Like device ID must be grouped together.) 601 */ 602 603 static const struct pci_id_descr pci_dev_descr_knl[] = { 604 [0 ... 1] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_MC, 0, IMC0)}, 605 [2 ... 7] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN, 0, IMC0) }, 606 [8] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TA, 0, IMC0) }, 607 [9] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM, 0, IMC0) }, 608 [10] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0, 0, SOCK) }, 609 [11] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1, 0, SOCK) }, 610 [12 ... 49] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHA, 0, SOCK) }, 611 }; 612 613 static const struct pci_id_table pci_dev_descr_knl_table[] = { 614 PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, ARRAY_SIZE(pci_dev_descr_knl), 1, KNIGHTS_LANDING), 615 {0,} 616 }; 617 618 /* 619 * Broadwell support 620 * 621 * DE processor: 622 * - 1 IMC 623 * - 2 DDR3 channels, 2 DPC per channel 624 * EP processor: 625 * - 1 or 2 IMC 626 * - 4 DDR4 channels, 3 DPC per channel 627 * EP 4S processor: 628 * - 2 IMC 629 * - 4 DDR4 channels, 3 DPC per channel 630 * EX processor: 631 * - 2 IMC 632 * - each IMC interfaces with a SMI 2 channel 633 * - each SMI channel interfaces with a scalable memory buffer 634 * - each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC 635 */ 636 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC 0x6f28 637 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0 0x6fa0 638 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1 0x6f60 639 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA 0x6fa8 640 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM 0x6f71 641 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA 0x6f68 642 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM 0x6f79 643 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0 0x6ffc 644 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1 0x6ffd 645 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0 0x6faa 646 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1 0x6fab 647 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2 0x6fac 648 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3 0x6fad 649 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0 0x6f6a 650 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1 0x6f6b 651 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2 0x6f6c 652 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3 0x6f6d 653 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0 0x6faf 654 655 static const struct pci_id_descr pci_dev_descr_broadwell[] = { 656 /* first item must be the HA */ 657 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0, 0, IMC0) }, 658 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1, 1, IMC1) }, 659 660 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA, 0, IMC0) }, 661 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM, 0, IMC0) }, 662 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0, 0, IMC0) }, 663 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1, 0, IMC0) }, 664 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2, 1, IMC0) }, 665 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3, 1, IMC0) }, 666 667 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA, 1, IMC1) }, 668 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM, 1, IMC1) }, 669 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0, 1, IMC1) }, 670 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1, 1, IMC1) }, 671 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2, 1, IMC1) }, 672 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3, 1, IMC1) }, 673 674 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0, 0, SOCK) }, 675 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1, 0, SOCK) }, 676 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0, 1, SOCK) }, 677 }; 678 679 static const struct pci_id_table pci_dev_descr_broadwell_table[] = { 680 PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, 10, 2, BROADWELL), 681 {0,} /* 0 terminated list. */ 682 }; 683 684 685 /**************************************************************************** 686 Ancillary status routines 687 ****************************************************************************/ 688 689 static inline int numrank(enum type type, u32 mtr) 690 { 691 int ranks = (1 << RANK_CNT_BITS(mtr)); 692 int max = 4; 693 694 if (type == HASWELL || type == BROADWELL || type == KNIGHTS_LANDING) 695 max = 8; 696 697 if (ranks > max) { 698 edac_dbg(0, "Invalid number of ranks: %d (max = %i) raw value = %x (%04x)\n", 699 ranks, max, (unsigned int)RANK_CNT_BITS(mtr), mtr); 700 return -EINVAL; 701 } 702 703 return ranks; 704 } 705 706 static inline int numrow(u32 mtr) 707 { 708 int rows = (RANK_WIDTH_BITS(mtr) + 12); 709 710 if (rows < 13 || rows > 18) { 711 edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n", 712 rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr); 713 return -EINVAL; 714 } 715 716 return 1 << rows; 717 } 718 719 static inline int numcol(u32 mtr) 720 { 721 int cols = (COL_WIDTH_BITS(mtr) + 10); 722 723 if (cols > 12) { 724 edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n", 725 cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr); 726 return -EINVAL; 727 } 728 729 return 1 << cols; 730 } 731 732 static struct sbridge_dev *get_sbridge_dev(u8 bus, enum domain dom, int multi_bus, 733 struct sbridge_dev *prev) 734 { 735 struct sbridge_dev *sbridge_dev; 736 737 /* 738 * If we have devices scattered across several busses that pertain 739 * to the same memory controller, we'll lump them all together. 740 */ 741 if (multi_bus) { 742 return list_first_entry_or_null(&sbridge_edac_list, 743 struct sbridge_dev, list); 744 } 745 746 sbridge_dev = list_entry(prev ? prev->list.next 747 : sbridge_edac_list.next, struct sbridge_dev, list); 748 749 list_for_each_entry_from(sbridge_dev, &sbridge_edac_list, list) { 750 if (sbridge_dev->bus == bus && (dom == SOCK || dom == sbridge_dev->dom)) 751 return sbridge_dev; 752 } 753 754 return NULL; 755 } 756 757 static struct sbridge_dev *alloc_sbridge_dev(u8 bus, enum domain dom, 758 const struct pci_id_table *table) 759 { 760 struct sbridge_dev *sbridge_dev; 761 762 sbridge_dev = kzalloc(sizeof(*sbridge_dev), GFP_KERNEL); 763 if (!sbridge_dev) 764 return NULL; 765 766 sbridge_dev->pdev = kcalloc(table->n_devs_per_imc, 767 sizeof(*sbridge_dev->pdev), 768 GFP_KERNEL); 769 if (!sbridge_dev->pdev) { 770 kfree(sbridge_dev); 771 return NULL; 772 } 773 774 sbridge_dev->bus = bus; 775 sbridge_dev->dom = dom; 776 sbridge_dev->n_devs = table->n_devs_per_imc; 777 list_add_tail(&sbridge_dev->list, &sbridge_edac_list); 778 779 return sbridge_dev; 780 } 781 782 static void free_sbridge_dev(struct sbridge_dev *sbridge_dev) 783 { 784 list_del(&sbridge_dev->list); 785 kfree(sbridge_dev->pdev); 786 kfree(sbridge_dev); 787 } 788 789 static u64 sbridge_get_tolm(struct sbridge_pvt *pvt) 790 { 791 u32 reg; 792 793 /* Address range is 32:28 */ 794 pci_read_config_dword(pvt->pci_sad1, TOLM, ®); 795 return GET_TOLM(reg); 796 } 797 798 static u64 sbridge_get_tohm(struct sbridge_pvt *pvt) 799 { 800 u32 reg; 801 802 pci_read_config_dword(pvt->pci_sad1, TOHM, ®); 803 return GET_TOHM(reg); 804 } 805 806 static u64 ibridge_get_tolm(struct sbridge_pvt *pvt) 807 { 808 u32 reg; 809 810 pci_read_config_dword(pvt->pci_br1, TOLM, ®); 811 812 return GET_TOLM(reg); 813 } 814 815 static u64 ibridge_get_tohm(struct sbridge_pvt *pvt) 816 { 817 u32 reg; 818 819 pci_read_config_dword(pvt->pci_br1, TOHM, ®); 820 821 return GET_TOHM(reg); 822 } 823 824 static u64 rir_limit(u32 reg) 825 { 826 return ((u64)GET_BITFIELD(reg, 1, 10) << 29) | 0x1fffffff; 827 } 828 829 static u64 sad_limit(u32 reg) 830 { 831 return (GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff; 832 } 833 834 static u32 interleave_mode(u32 reg) 835 { 836 return GET_BITFIELD(reg, 1, 1); 837 } 838 839 static u32 dram_attr(u32 reg) 840 { 841 return GET_BITFIELD(reg, 2, 3); 842 } 843 844 static u64 knl_sad_limit(u32 reg) 845 { 846 return (GET_BITFIELD(reg, 7, 26) << 26) | 0x3ffffff; 847 } 848 849 static u32 knl_interleave_mode(u32 reg) 850 { 851 return GET_BITFIELD(reg, 1, 2); 852 } 853 854 static const char * const knl_intlv_mode[] = { 855 "[8:6]", "[10:8]", "[14:12]", "[32:30]" 856 }; 857 858 static const char *get_intlv_mode_str(u32 reg, enum type t) 859 { 860 if (t == KNIGHTS_LANDING) 861 return knl_intlv_mode[knl_interleave_mode(reg)]; 862 else 863 return interleave_mode(reg) ? "[8:6]" : "[8:6]XOR[18:16]"; 864 } 865 866 static u32 dram_attr_knl(u32 reg) 867 { 868 return GET_BITFIELD(reg, 3, 4); 869 } 870 871 872 static enum mem_type get_memory_type(struct sbridge_pvt *pvt) 873 { 874 u32 reg; 875 enum mem_type mtype; 876 877 if (pvt->pci_ddrio) { 878 pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr, 879 ®); 880 if (GET_BITFIELD(reg, 11, 11)) 881 /* FIXME: Can also be LRDIMM */ 882 mtype = MEM_RDDR3; 883 else 884 mtype = MEM_DDR3; 885 } else 886 mtype = MEM_UNKNOWN; 887 888 return mtype; 889 } 890 891 static enum mem_type haswell_get_memory_type(struct sbridge_pvt *pvt) 892 { 893 u32 reg; 894 bool registered = false; 895 enum mem_type mtype = MEM_UNKNOWN; 896 897 if (!pvt->pci_ddrio) 898 goto out; 899 900 pci_read_config_dword(pvt->pci_ddrio, 901 HASWELL_DDRCRCLKCONTROLS, ®); 902 /* Is_Rdimm */ 903 if (GET_BITFIELD(reg, 16, 16)) 904 registered = true; 905 906 pci_read_config_dword(pvt->pci_ta, MCMTR, ®); 907 if (GET_BITFIELD(reg, 14, 14)) { 908 if (registered) 909 mtype = MEM_RDDR4; 910 else 911 mtype = MEM_DDR4; 912 } else { 913 if (registered) 914 mtype = MEM_RDDR3; 915 else 916 mtype = MEM_DDR3; 917 } 918 919 out: 920 return mtype; 921 } 922 923 static enum dev_type knl_get_width(struct sbridge_pvt *pvt, u32 mtr) 924 { 925 /* for KNL value is fixed */ 926 return DEV_X16; 927 } 928 929 static enum dev_type sbridge_get_width(struct sbridge_pvt *pvt, u32 mtr) 930 { 931 /* there's no way to figure out */ 932 return DEV_UNKNOWN; 933 } 934 935 static enum dev_type __ibridge_get_width(u32 mtr) 936 { 937 enum dev_type type; 938 939 switch (mtr) { 940 case 3: 941 type = DEV_UNKNOWN; 942 break; 943 case 2: 944 type = DEV_X16; 945 break; 946 case 1: 947 type = DEV_X8; 948 break; 949 case 0: 950 type = DEV_X4; 951 break; 952 } 953 954 return type; 955 } 956 957 static enum dev_type ibridge_get_width(struct sbridge_pvt *pvt, u32 mtr) 958 { 959 /* 960 * ddr3_width on the documentation but also valid for DDR4 on 961 * Haswell 962 */ 963 return __ibridge_get_width(GET_BITFIELD(mtr, 7, 8)); 964 } 965 966 static enum dev_type broadwell_get_width(struct sbridge_pvt *pvt, u32 mtr) 967 { 968 /* ddr3_width on the documentation but also valid for DDR4 */ 969 return __ibridge_get_width(GET_BITFIELD(mtr, 8, 9)); 970 } 971 972 static enum mem_type knl_get_memory_type(struct sbridge_pvt *pvt) 973 { 974 /* DDR4 RDIMMS and LRDIMMS are supported */ 975 return MEM_RDDR4; 976 } 977 978 static u8 get_node_id(struct sbridge_pvt *pvt) 979 { 980 u32 reg; 981 pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, ®); 982 return GET_BITFIELD(reg, 0, 2); 983 } 984 985 static u8 haswell_get_node_id(struct sbridge_pvt *pvt) 986 { 987 u32 reg; 988 989 pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, ®); 990 return GET_BITFIELD(reg, 0, 3); 991 } 992 993 static u8 knl_get_node_id(struct sbridge_pvt *pvt) 994 { 995 u32 reg; 996 997 pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, ®); 998 return GET_BITFIELD(reg, 0, 2); 999 } 1000 1001 1002 static u64 haswell_get_tolm(struct sbridge_pvt *pvt) 1003 { 1004 u32 reg; 1005 1006 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOLM, ®); 1007 return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff; 1008 } 1009 1010 static u64 haswell_get_tohm(struct sbridge_pvt *pvt) 1011 { 1012 u64 rc; 1013 u32 reg; 1014 1015 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_0, ®); 1016 rc = GET_BITFIELD(reg, 26, 31); 1017 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, ®); 1018 rc = ((reg << 6) | rc) << 26; 1019 1020 return rc | 0x1ffffff; 1021 } 1022 1023 static u64 knl_get_tolm(struct sbridge_pvt *pvt) 1024 { 1025 u32 reg; 1026 1027 pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOLM, ®); 1028 return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff; 1029 } 1030 1031 static u64 knl_get_tohm(struct sbridge_pvt *pvt) 1032 { 1033 u64 rc; 1034 u32 reg_lo, reg_hi; 1035 1036 pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_0, ®_lo); 1037 pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_1, ®_hi); 1038 rc = ((u64)reg_hi << 32) | reg_lo; 1039 return rc | 0x3ffffff; 1040 } 1041 1042 1043 static u64 haswell_rir_limit(u32 reg) 1044 { 1045 return (((u64)GET_BITFIELD(reg, 1, 11) + 1) << 29) - 1; 1046 } 1047 1048 static inline u8 sad_pkg_socket(u8 pkg) 1049 { 1050 /* on Ivy Bridge, nodeID is SASS, where A is HA and S is node id */ 1051 return ((pkg >> 3) << 2) | (pkg & 0x3); 1052 } 1053 1054 static inline u8 sad_pkg_ha(u8 pkg) 1055 { 1056 return (pkg >> 2) & 0x1; 1057 } 1058 1059 static int haswell_chan_hash(int idx, u64 addr) 1060 { 1061 int i; 1062 1063 /* 1064 * XOR even bits from 12:26 to bit0 of idx, 1065 * odd bits from 13:27 to bit1 1066 */ 1067 for (i = 12; i < 28; i += 2) 1068 idx ^= (addr >> i) & 3; 1069 1070 return idx; 1071 } 1072 1073 /* Low bits of TAD limit, and some metadata. */ 1074 static const u32 knl_tad_dram_limit_lo[] = { 1075 0x400, 0x500, 0x600, 0x700, 1076 0x800, 0x900, 0xa00, 0xb00, 1077 }; 1078 1079 /* Low bits of TAD offset. */ 1080 static const u32 knl_tad_dram_offset_lo[] = { 1081 0x404, 0x504, 0x604, 0x704, 1082 0x804, 0x904, 0xa04, 0xb04, 1083 }; 1084 1085 /* High 16 bits of TAD limit and offset. */ 1086 static const u32 knl_tad_dram_hi[] = { 1087 0x408, 0x508, 0x608, 0x708, 1088 0x808, 0x908, 0xa08, 0xb08, 1089 }; 1090 1091 /* Number of ways a tad entry is interleaved. */ 1092 static const u32 knl_tad_ways[] = { 1093 8, 6, 4, 3, 2, 1, 1094 }; 1095 1096 /* 1097 * Retrieve the n'th Target Address Decode table entry 1098 * from the memory controller's TAD table. 1099 * 1100 * @pvt: driver private data 1101 * @entry: which entry you want to retrieve 1102 * @mc: which memory controller (0 or 1) 1103 * @offset: output tad range offset 1104 * @limit: output address of first byte above tad range 1105 * @ways: output number of interleave ways 1106 * 1107 * The offset value has curious semantics. It's a sort of running total 1108 * of the sizes of all the memory regions that aren't mapped in this 1109 * tad table. 1110 */ 1111 static int knl_get_tad(const struct sbridge_pvt *pvt, 1112 const int entry, 1113 const int mc, 1114 u64 *offset, 1115 u64 *limit, 1116 int *ways) 1117 { 1118 u32 reg_limit_lo, reg_offset_lo, reg_hi; 1119 struct pci_dev *pci_mc; 1120 int way_id; 1121 1122 switch (mc) { 1123 case 0: 1124 pci_mc = pvt->knl.pci_mc0; 1125 break; 1126 case 1: 1127 pci_mc = pvt->knl.pci_mc1; 1128 break; 1129 default: 1130 WARN_ON(1); 1131 return -EINVAL; 1132 } 1133 1134 pci_read_config_dword(pci_mc, 1135 knl_tad_dram_limit_lo[entry], ®_limit_lo); 1136 pci_read_config_dword(pci_mc, 1137 knl_tad_dram_offset_lo[entry], ®_offset_lo); 1138 pci_read_config_dword(pci_mc, 1139 knl_tad_dram_hi[entry], ®_hi); 1140 1141 /* Is this TAD entry enabled? */ 1142 if (!GET_BITFIELD(reg_limit_lo, 0, 0)) 1143 return -ENODEV; 1144 1145 way_id = GET_BITFIELD(reg_limit_lo, 3, 5); 1146 1147 if (way_id < ARRAY_SIZE(knl_tad_ways)) { 1148 *ways = knl_tad_ways[way_id]; 1149 } else { 1150 *ways = 0; 1151 sbridge_printk(KERN_ERR, 1152 "Unexpected value %d in mc_tad_limit_lo wayness field\n", 1153 way_id); 1154 return -ENODEV; 1155 } 1156 1157 /* 1158 * The least significant 6 bits of base and limit are truncated. 1159 * For limit, we fill the missing bits with 1s. 1160 */ 1161 *offset = ((u64) GET_BITFIELD(reg_offset_lo, 6, 31) << 6) | 1162 ((u64) GET_BITFIELD(reg_hi, 0, 15) << 32); 1163 *limit = ((u64) GET_BITFIELD(reg_limit_lo, 6, 31) << 6) | 63 | 1164 ((u64) GET_BITFIELD(reg_hi, 16, 31) << 32); 1165 1166 return 0; 1167 } 1168 1169 /* Determine which memory controller is responsible for a given channel. */ 1170 static int knl_channel_mc(int channel) 1171 { 1172 WARN_ON(channel < 0 || channel >= 6); 1173 1174 return channel < 3 ? 1 : 0; 1175 } 1176 1177 /* 1178 * Get the Nth entry from EDC_ROUTE_TABLE register. 1179 * (This is the per-tile mapping of logical interleave targets to 1180 * physical EDC modules.) 1181 * 1182 * entry 0: 0:2 1183 * 1: 3:5 1184 * 2: 6:8 1185 * 3: 9:11 1186 * 4: 12:14 1187 * 5: 15:17 1188 * 6: 18:20 1189 * 7: 21:23 1190 * reserved: 24:31 1191 */ 1192 static u32 knl_get_edc_route(int entry, u32 reg) 1193 { 1194 WARN_ON(entry >= KNL_MAX_EDCS); 1195 return GET_BITFIELD(reg, entry*3, (entry*3)+2); 1196 } 1197 1198 /* 1199 * Get the Nth entry from MC_ROUTE_TABLE register. 1200 * (This is the per-tile mapping of logical interleave targets to 1201 * physical DRAM channels modules.) 1202 * 1203 * entry 0: mc 0:2 channel 18:19 1204 * 1: mc 3:5 channel 20:21 1205 * 2: mc 6:8 channel 22:23 1206 * 3: mc 9:11 channel 24:25 1207 * 4: mc 12:14 channel 26:27 1208 * 5: mc 15:17 channel 28:29 1209 * reserved: 30:31 1210 * 1211 * Though we have 3 bits to identify the MC, we should only see 1212 * the values 0 or 1. 1213 */ 1214 1215 static u32 knl_get_mc_route(int entry, u32 reg) 1216 { 1217 int mc, chan; 1218 1219 WARN_ON(entry >= KNL_MAX_CHANNELS); 1220 1221 mc = GET_BITFIELD(reg, entry*3, (entry*3)+2); 1222 chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1); 1223 1224 return knl_channel_remap(mc, chan); 1225 } 1226 1227 /* 1228 * Render the EDC_ROUTE register in human-readable form. 1229 * Output string s should be at least KNL_MAX_EDCS*2 bytes. 1230 */ 1231 static void knl_show_edc_route(u32 reg, char *s) 1232 { 1233 int i; 1234 1235 for (i = 0; i < KNL_MAX_EDCS; i++) { 1236 s[i*2] = knl_get_edc_route(i, reg) + '0'; 1237 s[i*2+1] = '-'; 1238 } 1239 1240 s[KNL_MAX_EDCS*2 - 1] = '\0'; 1241 } 1242 1243 /* 1244 * Render the MC_ROUTE register in human-readable form. 1245 * Output string s should be at least KNL_MAX_CHANNELS*2 bytes. 1246 */ 1247 static void knl_show_mc_route(u32 reg, char *s) 1248 { 1249 int i; 1250 1251 for (i = 0; i < KNL_MAX_CHANNELS; i++) { 1252 s[i*2] = knl_get_mc_route(i, reg) + '0'; 1253 s[i*2+1] = '-'; 1254 } 1255 1256 s[KNL_MAX_CHANNELS*2 - 1] = '\0'; 1257 } 1258 1259 #define KNL_EDC_ROUTE 0xb8 1260 #define KNL_MC_ROUTE 0xb4 1261 1262 /* Is this dram rule backed by regular DRAM in flat mode? */ 1263 #define KNL_EDRAM(reg) GET_BITFIELD(reg, 29, 29) 1264 1265 /* Is this dram rule cached? */ 1266 #define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28) 1267 1268 /* Is this rule backed by edc ? */ 1269 #define KNL_EDRAM_ONLY(reg) GET_BITFIELD(reg, 29, 29) 1270 1271 /* Is this rule backed by DRAM, cacheable in EDRAM? */ 1272 #define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28) 1273 1274 /* Is this rule mod3? */ 1275 #define KNL_MOD3(reg) GET_BITFIELD(reg, 27, 27) 1276 1277 /* 1278 * Figure out how big our RAM modules are. 1279 * 1280 * The DIMMMTR register in KNL doesn't tell us the size of the DIMMs, so we 1281 * have to figure this out from the SAD rules, interleave lists, route tables, 1282 * and TAD rules. 1283 * 1284 * SAD rules can have holes in them (e.g. the 3G-4G hole), so we have to 1285 * inspect the TAD rules to figure out how large the SAD regions really are. 1286 * 1287 * When we know the real size of a SAD region and how many ways it's 1288 * interleaved, we know the individual contribution of each channel to 1289 * TAD is size/ways. 1290 * 1291 * Finally, we have to check whether each channel participates in each SAD 1292 * region. 1293 * 1294 * Fortunately, KNL only supports one DIMM per channel, so once we know how 1295 * much memory the channel uses, we know the DIMM is at least that large. 1296 * (The BIOS might possibly choose not to map all available memory, in which 1297 * case we will underreport the size of the DIMM.) 1298 * 1299 * In theory, we could try to determine the EDC sizes as well, but that would 1300 * only work in flat mode, not in cache mode. 1301 * 1302 * @mc_sizes: Output sizes of channels (must have space for KNL_MAX_CHANNELS 1303 * elements) 1304 */ 1305 static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes) 1306 { 1307 u64 sad_base, sad_size, sad_limit = 0; 1308 u64 tad_base, tad_size, tad_limit, tad_deadspace, tad_livespace; 1309 int sad_rule = 0; 1310 int tad_rule = 0; 1311 int intrlv_ways, tad_ways; 1312 u32 first_pkg, pkg; 1313 int i; 1314 u64 sad_actual_size[2]; /* sad size accounting for holes, per mc */ 1315 u32 dram_rule, interleave_reg; 1316 u32 mc_route_reg[KNL_MAX_CHAS]; 1317 u32 edc_route_reg[KNL_MAX_CHAS]; 1318 int edram_only; 1319 char edc_route_string[KNL_MAX_EDCS*2]; 1320 char mc_route_string[KNL_MAX_CHANNELS*2]; 1321 int cur_reg_start; 1322 int mc; 1323 int channel; 1324 int participants[KNL_MAX_CHANNELS]; 1325 1326 for (i = 0; i < KNL_MAX_CHANNELS; i++) 1327 mc_sizes[i] = 0; 1328 1329 /* Read the EDC route table in each CHA. */ 1330 cur_reg_start = 0; 1331 for (i = 0; i < KNL_MAX_CHAS; i++) { 1332 pci_read_config_dword(pvt->knl.pci_cha[i], 1333 KNL_EDC_ROUTE, &edc_route_reg[i]); 1334 1335 if (i > 0 && edc_route_reg[i] != edc_route_reg[i-1]) { 1336 knl_show_edc_route(edc_route_reg[i-1], 1337 edc_route_string); 1338 if (cur_reg_start == i-1) 1339 edac_dbg(0, "edc route table for CHA %d: %s\n", 1340 cur_reg_start, edc_route_string); 1341 else 1342 edac_dbg(0, "edc route table for CHA %d-%d: %s\n", 1343 cur_reg_start, i-1, edc_route_string); 1344 cur_reg_start = i; 1345 } 1346 } 1347 knl_show_edc_route(edc_route_reg[i-1], edc_route_string); 1348 if (cur_reg_start == i-1) 1349 edac_dbg(0, "edc route table for CHA %d: %s\n", 1350 cur_reg_start, edc_route_string); 1351 else 1352 edac_dbg(0, "edc route table for CHA %d-%d: %s\n", 1353 cur_reg_start, i-1, edc_route_string); 1354 1355 /* Read the MC route table in each CHA. */ 1356 cur_reg_start = 0; 1357 for (i = 0; i < KNL_MAX_CHAS; i++) { 1358 pci_read_config_dword(pvt->knl.pci_cha[i], 1359 KNL_MC_ROUTE, &mc_route_reg[i]); 1360 1361 if (i > 0 && mc_route_reg[i] != mc_route_reg[i-1]) { 1362 knl_show_mc_route(mc_route_reg[i-1], mc_route_string); 1363 if (cur_reg_start == i-1) 1364 edac_dbg(0, "mc route table for CHA %d: %s\n", 1365 cur_reg_start, mc_route_string); 1366 else 1367 edac_dbg(0, "mc route table for CHA %d-%d: %s\n", 1368 cur_reg_start, i-1, mc_route_string); 1369 cur_reg_start = i; 1370 } 1371 } 1372 knl_show_mc_route(mc_route_reg[i-1], mc_route_string); 1373 if (cur_reg_start == i-1) 1374 edac_dbg(0, "mc route table for CHA %d: %s\n", 1375 cur_reg_start, mc_route_string); 1376 else 1377 edac_dbg(0, "mc route table for CHA %d-%d: %s\n", 1378 cur_reg_start, i-1, mc_route_string); 1379 1380 /* Process DRAM rules */ 1381 for (sad_rule = 0; sad_rule < pvt->info.max_sad; sad_rule++) { 1382 /* previous limit becomes the new base */ 1383 sad_base = sad_limit; 1384 1385 pci_read_config_dword(pvt->pci_sad0, 1386 pvt->info.dram_rule[sad_rule], &dram_rule); 1387 1388 if (!DRAM_RULE_ENABLE(dram_rule)) 1389 break; 1390 1391 edram_only = KNL_EDRAM_ONLY(dram_rule); 1392 1393 sad_limit = pvt->info.sad_limit(dram_rule)+1; 1394 sad_size = sad_limit - sad_base; 1395 1396 pci_read_config_dword(pvt->pci_sad0, 1397 pvt->info.interleave_list[sad_rule], &interleave_reg); 1398 1399 /* 1400 * Find out how many ways this dram rule is interleaved. 1401 * We stop when we see the first channel again. 1402 */ 1403 first_pkg = sad_pkg(pvt->info.interleave_pkg, 1404 interleave_reg, 0); 1405 for (intrlv_ways = 1; intrlv_ways < 8; intrlv_ways++) { 1406 pkg = sad_pkg(pvt->info.interleave_pkg, 1407 interleave_reg, intrlv_ways); 1408 1409 if ((pkg & 0x8) == 0) { 1410 /* 1411 * 0 bit means memory is non-local, 1412 * which KNL doesn't support 1413 */ 1414 edac_dbg(0, "Unexpected interleave target %d\n", 1415 pkg); 1416 return -1; 1417 } 1418 1419 if (pkg == first_pkg) 1420 break; 1421 } 1422 if (KNL_MOD3(dram_rule)) 1423 intrlv_ways *= 3; 1424 1425 edac_dbg(3, "dram rule %d (base 0x%llx, limit 0x%llx), %d way interleave%s\n", 1426 sad_rule, 1427 sad_base, 1428 sad_limit, 1429 intrlv_ways, 1430 edram_only ? ", EDRAM" : ""); 1431 1432 /* 1433 * Find out how big the SAD region really is by iterating 1434 * over TAD tables (SAD regions may contain holes). 1435 * Each memory controller might have a different TAD table, so 1436 * we have to look at both. 1437 * 1438 * Livespace is the memory that's mapped in this TAD table, 1439 * deadspace is the holes (this could be the MMIO hole, or it 1440 * could be memory that's mapped by the other TAD table but 1441 * not this one). 1442 */ 1443 for (mc = 0; mc < 2; mc++) { 1444 sad_actual_size[mc] = 0; 1445 tad_livespace = 0; 1446 for (tad_rule = 0; 1447 tad_rule < ARRAY_SIZE( 1448 knl_tad_dram_limit_lo); 1449 tad_rule++) { 1450 if (knl_get_tad(pvt, 1451 tad_rule, 1452 mc, 1453 &tad_deadspace, 1454 &tad_limit, 1455 &tad_ways)) 1456 break; 1457 1458 tad_size = (tad_limit+1) - 1459 (tad_livespace + tad_deadspace); 1460 tad_livespace += tad_size; 1461 tad_base = (tad_limit+1) - tad_size; 1462 1463 if (tad_base < sad_base) { 1464 if (tad_limit > sad_base) 1465 edac_dbg(0, "TAD region overlaps lower SAD boundary -- TAD tables may be configured incorrectly.\n"); 1466 } else if (tad_base < sad_limit) { 1467 if (tad_limit+1 > sad_limit) { 1468 edac_dbg(0, "TAD region overlaps upper SAD boundary -- TAD tables may be configured incorrectly.\n"); 1469 } else { 1470 /* TAD region is completely inside SAD region */ 1471 edac_dbg(3, "TAD region %d 0x%llx - 0x%llx (%lld bytes) table%d\n", 1472 tad_rule, tad_base, 1473 tad_limit, tad_size, 1474 mc); 1475 sad_actual_size[mc] += tad_size; 1476 } 1477 } 1478 tad_base = tad_limit+1; 1479 } 1480 } 1481 1482 for (mc = 0; mc < 2; mc++) { 1483 edac_dbg(3, " total TAD DRAM footprint in table%d : 0x%llx (%lld bytes)\n", 1484 mc, sad_actual_size[mc], sad_actual_size[mc]); 1485 } 1486 1487 /* Ignore EDRAM rule */ 1488 if (edram_only) 1489 continue; 1490 1491 /* Figure out which channels participate in interleave. */ 1492 for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) 1493 participants[channel] = 0; 1494 1495 /* For each channel, does at least one CHA have 1496 * this channel mapped to the given target? 1497 */ 1498 for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) { 1499 int target; 1500 int cha; 1501 1502 for (target = 0; target < KNL_MAX_CHANNELS; target++) { 1503 for (cha = 0; cha < KNL_MAX_CHAS; cha++) { 1504 if (knl_get_mc_route(target, 1505 mc_route_reg[cha]) == channel 1506 && !participants[channel]) { 1507 participants[channel] = 1; 1508 break; 1509 } 1510 } 1511 } 1512 } 1513 1514 for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) { 1515 mc = knl_channel_mc(channel); 1516 if (participants[channel]) { 1517 edac_dbg(4, "mc channel %d contributes %lld bytes via sad entry %d\n", 1518 channel, 1519 sad_actual_size[mc]/intrlv_ways, 1520 sad_rule); 1521 mc_sizes[channel] += 1522 sad_actual_size[mc]/intrlv_ways; 1523 } 1524 } 1525 } 1526 1527 return 0; 1528 } 1529 1530 static void get_source_id(struct mem_ctl_info *mci) 1531 { 1532 struct sbridge_pvt *pvt = mci->pvt_info; 1533 u32 reg; 1534 1535 if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL || 1536 pvt->info.type == KNIGHTS_LANDING) 1537 pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, ®); 1538 else 1539 pci_read_config_dword(pvt->pci_br0, SAD_TARGET, ®); 1540 1541 if (pvt->info.type == KNIGHTS_LANDING) 1542 pvt->sbridge_dev->source_id = SOURCE_ID_KNL(reg); 1543 else 1544 pvt->sbridge_dev->source_id = SOURCE_ID(reg); 1545 } 1546 1547 static int __populate_dimms(struct mem_ctl_info *mci, 1548 u64 knl_mc_sizes[KNL_MAX_CHANNELS], 1549 enum edac_type mode) 1550 { 1551 struct sbridge_pvt *pvt = mci->pvt_info; 1552 int channels = pvt->info.type == KNIGHTS_LANDING ? KNL_MAX_CHANNELS 1553 : NUM_CHANNELS; 1554 unsigned int i, j, banks, ranks, rows, cols, npages; 1555 struct dimm_info *dimm; 1556 enum mem_type mtype; 1557 u64 size; 1558 1559 mtype = pvt->info.get_memory_type(pvt); 1560 if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4) 1561 edac_dbg(0, "Memory is registered\n"); 1562 else if (mtype == MEM_UNKNOWN) 1563 edac_dbg(0, "Cannot determine memory type\n"); 1564 else 1565 edac_dbg(0, "Memory is unregistered\n"); 1566 1567 if (mtype == MEM_DDR4 || mtype == MEM_RDDR4) 1568 banks = 16; 1569 else 1570 banks = 8; 1571 1572 for (i = 0; i < channels; i++) { 1573 u32 mtr; 1574 1575 int max_dimms_per_channel; 1576 1577 if (pvt->info.type == KNIGHTS_LANDING) { 1578 max_dimms_per_channel = 1; 1579 if (!pvt->knl.pci_channel[i]) 1580 continue; 1581 } else { 1582 max_dimms_per_channel = ARRAY_SIZE(mtr_regs); 1583 if (!pvt->pci_tad[i]) 1584 continue; 1585 } 1586 1587 for (j = 0; j < max_dimms_per_channel; j++) { 1588 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0); 1589 if (pvt->info.type == KNIGHTS_LANDING) { 1590 pci_read_config_dword(pvt->knl.pci_channel[i], 1591 knl_mtr_reg, &mtr); 1592 } else { 1593 pci_read_config_dword(pvt->pci_tad[i], 1594 mtr_regs[j], &mtr); 1595 } 1596 edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr); 1597 if (IS_DIMM_PRESENT(mtr)) { 1598 if (!IS_ECC_ENABLED(pvt->info.mcmtr)) { 1599 sbridge_printk(KERN_ERR, "CPU SrcID #%d, Ha #%d, Channel #%d has DIMMs, but ECC is disabled\n", 1600 pvt->sbridge_dev->source_id, 1601 pvt->sbridge_dev->dom, i); 1602 return -ENODEV; 1603 } 1604 pvt->channel[i].dimms++; 1605 1606 ranks = numrank(pvt->info.type, mtr); 1607 1608 if (pvt->info.type == KNIGHTS_LANDING) { 1609 /* For DDR4, this is fixed. */ 1610 cols = 1 << 10; 1611 rows = knl_mc_sizes[i] / 1612 ((u64) cols * ranks * banks * 8); 1613 } else { 1614 rows = numrow(mtr); 1615 cols = numcol(mtr); 1616 } 1617 1618 size = ((u64)rows * cols * banks * ranks) >> (20 - 3); 1619 npages = MiB_TO_PAGES(size); 1620 1621 edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", 1622 pvt->sbridge_dev->mc, pvt->sbridge_dev->dom, i, j, 1623 size, npages, 1624 banks, ranks, rows, cols); 1625 1626 dimm->nr_pages = npages; 1627 dimm->grain = 32; 1628 dimm->dtype = pvt->info.get_width(pvt, mtr); 1629 dimm->mtype = mtype; 1630 dimm->edac_mode = mode; 1631 snprintf(dimm->label, sizeof(dimm->label), 1632 "CPU_SrcID#%u_Ha#%u_Chan#%u_DIMM#%u", 1633 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom, i, j); 1634 } 1635 } 1636 } 1637 1638 return 0; 1639 } 1640 1641 static int get_dimm_config(struct mem_ctl_info *mci) 1642 { 1643 struct sbridge_pvt *pvt = mci->pvt_info; 1644 u64 knl_mc_sizes[KNL_MAX_CHANNELS]; 1645 enum edac_type mode; 1646 u32 reg; 1647 1648 pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt); 1649 edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n", 1650 pvt->sbridge_dev->mc, 1651 pvt->sbridge_dev->node_id, 1652 pvt->sbridge_dev->source_id); 1653 1654 /* KNL doesn't support mirroring or lockstep, 1655 * and is always closed page 1656 */ 1657 if (pvt->info.type == KNIGHTS_LANDING) { 1658 mode = EDAC_S4ECD4ED; 1659 pvt->mirror_mode = NON_MIRRORING; 1660 pvt->is_cur_addr_mirrored = false; 1661 1662 if (knl_get_dimm_capacity(pvt, knl_mc_sizes) != 0) 1663 return -1; 1664 if (pci_read_config_dword(pvt->pci_ta, KNL_MCMTR, &pvt->info.mcmtr)) { 1665 edac_dbg(0, "Failed to read KNL_MCMTR register\n"); 1666 return -ENODEV; 1667 } 1668 } else { 1669 if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) { 1670 if (pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, ®)) { 1671 edac_dbg(0, "Failed to read HASWELL_HASYSDEFEATURE2 register\n"); 1672 return -ENODEV; 1673 } 1674 pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21); 1675 if (GET_BITFIELD(reg, 28, 28)) { 1676 pvt->mirror_mode = ADDR_RANGE_MIRRORING; 1677 edac_dbg(0, "Address range partial memory mirroring is enabled\n"); 1678 goto next; 1679 } 1680 } 1681 if (pci_read_config_dword(pvt->pci_ras, RASENABLES, ®)) { 1682 edac_dbg(0, "Failed to read RASENABLES register\n"); 1683 return -ENODEV; 1684 } 1685 if (IS_MIRROR_ENABLED(reg)) { 1686 pvt->mirror_mode = FULL_MIRRORING; 1687 edac_dbg(0, "Full memory mirroring is enabled\n"); 1688 } else { 1689 pvt->mirror_mode = NON_MIRRORING; 1690 edac_dbg(0, "Memory mirroring is disabled\n"); 1691 } 1692 1693 next: 1694 if (pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr)) { 1695 edac_dbg(0, "Failed to read MCMTR register\n"); 1696 return -ENODEV; 1697 } 1698 if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) { 1699 edac_dbg(0, "Lockstep is enabled\n"); 1700 mode = EDAC_S8ECD8ED; 1701 pvt->is_lockstep = true; 1702 } else { 1703 edac_dbg(0, "Lockstep is disabled\n"); 1704 mode = EDAC_S4ECD4ED; 1705 pvt->is_lockstep = false; 1706 } 1707 if (IS_CLOSE_PG(pvt->info.mcmtr)) { 1708 edac_dbg(0, "address map is on closed page mode\n"); 1709 pvt->is_close_pg = true; 1710 } else { 1711 edac_dbg(0, "address map is on open page mode\n"); 1712 pvt->is_close_pg = false; 1713 } 1714 } 1715 1716 return __populate_dimms(mci, knl_mc_sizes, mode); 1717 } 1718 1719 static void get_memory_layout(const struct mem_ctl_info *mci) 1720 { 1721 struct sbridge_pvt *pvt = mci->pvt_info; 1722 int i, j, k, n_sads, n_tads, sad_interl; 1723 u32 reg; 1724 u64 limit, prv = 0; 1725 u64 tmp_mb; 1726 u32 gb, mb; 1727 u32 rir_way; 1728 1729 /* 1730 * Step 1) Get TOLM/TOHM ranges 1731 */ 1732 1733 pvt->tolm = pvt->info.get_tolm(pvt); 1734 tmp_mb = (1 + pvt->tolm) >> 20; 1735 1736 gb = div_u64_rem(tmp_mb, 1024, &mb); 1737 edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", 1738 gb, (mb*1000)/1024, (u64)pvt->tolm); 1739 1740 /* Address range is already 45:25 */ 1741 pvt->tohm = pvt->info.get_tohm(pvt); 1742 tmp_mb = (1 + pvt->tohm) >> 20; 1743 1744 gb = div_u64_rem(tmp_mb, 1024, &mb); 1745 edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", 1746 gb, (mb*1000)/1024, (u64)pvt->tohm); 1747 1748 /* 1749 * Step 2) Get SAD range and SAD Interleave list 1750 * TAD registers contain the interleave wayness. However, it 1751 * seems simpler to just discover it indirectly, with the 1752 * algorithm bellow. 1753 */ 1754 prv = 0; 1755 for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) { 1756 /* SAD_LIMIT Address range is 45:26 */ 1757 pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads], 1758 ®); 1759 limit = pvt->info.sad_limit(reg); 1760 1761 if (!DRAM_RULE_ENABLE(reg)) 1762 continue; 1763 1764 if (limit <= prv) 1765 break; 1766 1767 tmp_mb = (limit + 1) >> 20; 1768 gb = div_u64_rem(tmp_mb, 1024, &mb); 1769 edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n", 1770 n_sads, 1771 show_dram_attr(pvt->info.dram_attr(reg)), 1772 gb, (mb*1000)/1024, 1773 ((u64)tmp_mb) << 20L, 1774 get_intlv_mode_str(reg, pvt->info.type), 1775 reg); 1776 prv = limit; 1777 1778 pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads], 1779 ®); 1780 sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0); 1781 for (j = 0; j < 8; j++) { 1782 u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, j); 1783 if (j > 0 && sad_interl == pkg) 1784 break; 1785 1786 edac_dbg(0, "SAD#%d, interleave #%d: %d\n", 1787 n_sads, j, pkg); 1788 } 1789 } 1790 1791 if (pvt->info.type == KNIGHTS_LANDING) 1792 return; 1793 1794 /* 1795 * Step 3) Get TAD range 1796 */ 1797 prv = 0; 1798 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) { 1799 pci_read_config_dword(pvt->pci_ha, tad_dram_rule[n_tads], ®); 1800 limit = TAD_LIMIT(reg); 1801 if (limit <= prv) 1802 break; 1803 tmp_mb = (limit + 1) >> 20; 1804 1805 gb = div_u64_rem(tmp_mb, 1024, &mb); 1806 edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n", 1807 n_tads, gb, (mb*1000)/1024, 1808 ((u64)tmp_mb) << 20L, 1809 (u32)(1 << TAD_SOCK(reg)), 1810 (u32)TAD_CH(reg) + 1, 1811 (u32)TAD_TGT0(reg), 1812 (u32)TAD_TGT1(reg), 1813 (u32)TAD_TGT2(reg), 1814 (u32)TAD_TGT3(reg), 1815 reg); 1816 prv = limit; 1817 } 1818 1819 /* 1820 * Step 4) Get TAD offsets, per each channel 1821 */ 1822 for (i = 0; i < NUM_CHANNELS; i++) { 1823 if (!pvt->channel[i].dimms) 1824 continue; 1825 for (j = 0; j < n_tads; j++) { 1826 pci_read_config_dword(pvt->pci_tad[i], 1827 tad_ch_nilv_offset[j], 1828 ®); 1829 tmp_mb = TAD_OFFSET(reg) >> 20; 1830 gb = div_u64_rem(tmp_mb, 1024, &mb); 1831 edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n", 1832 i, j, 1833 gb, (mb*1000)/1024, 1834 ((u64)tmp_mb) << 20L, 1835 reg); 1836 } 1837 } 1838 1839 /* 1840 * Step 6) Get RIR Wayness/Limit, per each channel 1841 */ 1842 for (i = 0; i < NUM_CHANNELS; i++) { 1843 if (!pvt->channel[i].dimms) 1844 continue; 1845 for (j = 0; j < MAX_RIR_RANGES; j++) { 1846 pci_read_config_dword(pvt->pci_tad[i], 1847 rir_way_limit[j], 1848 ®); 1849 1850 if (!IS_RIR_VALID(reg)) 1851 continue; 1852 1853 tmp_mb = pvt->info.rir_limit(reg) >> 20; 1854 rir_way = 1 << RIR_WAY(reg); 1855 gb = div_u64_rem(tmp_mb, 1024, &mb); 1856 edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n", 1857 i, j, 1858 gb, (mb*1000)/1024, 1859 ((u64)tmp_mb) << 20L, 1860 rir_way, 1861 reg); 1862 1863 for (k = 0; k < rir_way; k++) { 1864 pci_read_config_dword(pvt->pci_tad[i], 1865 rir_offset[j][k], 1866 ®); 1867 tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6; 1868 1869 gb = div_u64_rem(tmp_mb, 1024, &mb); 1870 edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n", 1871 i, j, k, 1872 gb, (mb*1000)/1024, 1873 ((u64)tmp_mb) << 20L, 1874 (u32)RIR_RNK_TGT(pvt->info.type, reg), 1875 reg); 1876 } 1877 } 1878 } 1879 } 1880 1881 static struct mem_ctl_info *get_mci_for_node_id(u8 node_id, u8 ha) 1882 { 1883 struct sbridge_dev *sbridge_dev; 1884 1885 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { 1886 if (sbridge_dev->node_id == node_id && sbridge_dev->dom == ha) 1887 return sbridge_dev->mci; 1888 } 1889 return NULL; 1890 } 1891 1892 static int get_memory_error_data(struct mem_ctl_info *mci, 1893 u64 addr, 1894 u8 *socket, u8 *ha, 1895 long *channel_mask, 1896 u8 *rank, 1897 char **area_type, char *msg) 1898 { 1899 struct mem_ctl_info *new_mci; 1900 struct sbridge_pvt *pvt = mci->pvt_info; 1901 struct pci_dev *pci_ha; 1902 int n_rir, n_sads, n_tads, sad_way, sck_xch; 1903 int sad_interl, idx, base_ch; 1904 int interleave_mode, shiftup = 0; 1905 unsigned int sad_interleave[MAX_INTERLEAVE]; 1906 u32 reg, dram_rule; 1907 u8 ch_way, sck_way, pkg, sad_ha = 0; 1908 u32 tad_offset; 1909 u32 rir_way; 1910 u32 mb, gb; 1911 u64 ch_addr, offset, limit = 0, prv = 0; 1912 1913 1914 /* 1915 * Step 0) Check if the address is at special memory ranges 1916 * The check bellow is probably enough to fill all cases where 1917 * the error is not inside a memory, except for the legacy 1918 * range (e. g. VGA addresses). It is unlikely, however, that the 1919 * memory controller would generate an error on that range. 1920 */ 1921 if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) { 1922 sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr); 1923 return -EINVAL; 1924 } 1925 if (addr >= (u64)pvt->tohm) { 1926 sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr); 1927 return -EINVAL; 1928 } 1929 1930 /* 1931 * Step 1) Get socket 1932 */ 1933 for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) { 1934 pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads], 1935 ®); 1936 1937 if (!DRAM_RULE_ENABLE(reg)) 1938 continue; 1939 1940 limit = pvt->info.sad_limit(reg); 1941 if (limit <= prv) { 1942 sprintf(msg, "Can't discover the memory socket"); 1943 return -EINVAL; 1944 } 1945 if (addr <= limit) 1946 break; 1947 prv = limit; 1948 } 1949 if (n_sads == pvt->info.max_sad) { 1950 sprintf(msg, "Can't discover the memory socket"); 1951 return -EINVAL; 1952 } 1953 dram_rule = reg; 1954 *area_type = show_dram_attr(pvt->info.dram_attr(dram_rule)); 1955 interleave_mode = pvt->info.interleave_mode(dram_rule); 1956 1957 pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads], 1958 ®); 1959 1960 if (pvt->info.type == SANDY_BRIDGE) { 1961 sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0); 1962 for (sad_way = 0; sad_way < 8; sad_way++) { 1963 u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, sad_way); 1964 if (sad_way > 0 && sad_interl == pkg) 1965 break; 1966 sad_interleave[sad_way] = pkg; 1967 edac_dbg(0, "SAD interleave #%d: %d\n", 1968 sad_way, sad_interleave[sad_way]); 1969 } 1970 edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n", 1971 pvt->sbridge_dev->mc, 1972 n_sads, 1973 addr, 1974 limit, 1975 sad_way + 7, 1976 !interleave_mode ? "" : "XOR[18:16]"); 1977 if (interleave_mode) 1978 idx = ((addr >> 6) ^ (addr >> 16)) & 7; 1979 else 1980 idx = (addr >> 6) & 7; 1981 switch (sad_way) { 1982 case 1: 1983 idx = 0; 1984 break; 1985 case 2: 1986 idx = idx & 1; 1987 break; 1988 case 4: 1989 idx = idx & 3; 1990 break; 1991 case 8: 1992 break; 1993 default: 1994 sprintf(msg, "Can't discover socket interleave"); 1995 return -EINVAL; 1996 } 1997 *socket = sad_interleave[idx]; 1998 edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n", 1999 idx, sad_way, *socket); 2000 } else if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) { 2001 int bits, a7mode = A7MODE(dram_rule); 2002 2003 if (a7mode) { 2004 /* A7 mode swaps P9 with P6 */ 2005 bits = GET_BITFIELD(addr, 7, 8) << 1; 2006 bits |= GET_BITFIELD(addr, 9, 9); 2007 } else 2008 bits = GET_BITFIELD(addr, 6, 8); 2009 2010 if (interleave_mode == 0) { 2011 /* interleave mode will XOR {8,7,6} with {18,17,16} */ 2012 idx = GET_BITFIELD(addr, 16, 18); 2013 idx ^= bits; 2014 } else 2015 idx = bits; 2016 2017 pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx); 2018 *socket = sad_pkg_socket(pkg); 2019 sad_ha = sad_pkg_ha(pkg); 2020 2021 if (a7mode) { 2022 /* MCChanShiftUpEnable */ 2023 pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, ®); 2024 shiftup = GET_BITFIELD(reg, 22, 22); 2025 } 2026 2027 edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %i, shiftup: %i\n", 2028 idx, *socket, sad_ha, shiftup); 2029 } else { 2030 /* Ivy Bridge's SAD mode doesn't support XOR interleave mode */ 2031 idx = (addr >> 6) & 7; 2032 pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx); 2033 *socket = sad_pkg_socket(pkg); 2034 sad_ha = sad_pkg_ha(pkg); 2035 edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n", 2036 idx, *socket, sad_ha); 2037 } 2038 2039 *ha = sad_ha; 2040 2041 /* 2042 * Move to the proper node structure, in order to access the 2043 * right PCI registers 2044 */ 2045 new_mci = get_mci_for_node_id(*socket, sad_ha); 2046 if (!new_mci) { 2047 sprintf(msg, "Struct for socket #%u wasn't initialized", 2048 *socket); 2049 return -EINVAL; 2050 } 2051 mci = new_mci; 2052 pvt = mci->pvt_info; 2053 2054 /* 2055 * Step 2) Get memory channel 2056 */ 2057 prv = 0; 2058 pci_ha = pvt->pci_ha; 2059 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) { 2060 pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], ®); 2061 limit = TAD_LIMIT(reg); 2062 if (limit <= prv) { 2063 sprintf(msg, "Can't discover the memory channel"); 2064 return -EINVAL; 2065 } 2066 if (addr <= limit) 2067 break; 2068 prv = limit; 2069 } 2070 if (n_tads == MAX_TAD) { 2071 sprintf(msg, "Can't discover the memory channel"); 2072 return -EINVAL; 2073 } 2074 2075 ch_way = TAD_CH(reg) + 1; 2076 sck_way = TAD_SOCK(reg); 2077 2078 if (ch_way == 3) 2079 idx = addr >> 6; 2080 else { 2081 idx = (addr >> (6 + sck_way + shiftup)) & 0x3; 2082 if (pvt->is_chan_hash) 2083 idx = haswell_chan_hash(idx, addr); 2084 } 2085 idx = idx % ch_way; 2086 2087 /* 2088 * FIXME: Shouldn't we use CHN_IDX_OFFSET() here, when ch_way == 3 ??? 2089 */ 2090 switch (idx) { 2091 case 0: 2092 base_ch = TAD_TGT0(reg); 2093 break; 2094 case 1: 2095 base_ch = TAD_TGT1(reg); 2096 break; 2097 case 2: 2098 base_ch = TAD_TGT2(reg); 2099 break; 2100 case 3: 2101 base_ch = TAD_TGT3(reg); 2102 break; 2103 default: 2104 sprintf(msg, "Can't discover the TAD target"); 2105 return -EINVAL; 2106 } 2107 *channel_mask = 1 << base_ch; 2108 2109 pci_read_config_dword(pvt->pci_tad[base_ch], tad_ch_nilv_offset[n_tads], &tad_offset); 2110 2111 if (pvt->mirror_mode == FULL_MIRRORING || 2112 (pvt->mirror_mode == ADDR_RANGE_MIRRORING && n_tads == 0)) { 2113 *channel_mask |= 1 << ((base_ch + 2) % 4); 2114 switch(ch_way) { 2115 case 2: 2116 case 4: 2117 sck_xch = (1 << sck_way) * (ch_way >> 1); 2118 break; 2119 default: 2120 sprintf(msg, "Invalid mirror set. Can't decode addr"); 2121 return -EINVAL; 2122 } 2123 2124 pvt->is_cur_addr_mirrored = true; 2125 } else { 2126 sck_xch = (1 << sck_way) * ch_way; 2127 pvt->is_cur_addr_mirrored = false; 2128 } 2129 2130 if (pvt->is_lockstep) 2131 *channel_mask |= 1 << ((base_ch + 1) % 4); 2132 2133 offset = TAD_OFFSET(tad_offset); 2134 2135 edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n", 2136 n_tads, 2137 addr, 2138 limit, 2139 sck_way, 2140 ch_way, 2141 offset, 2142 idx, 2143 base_ch, 2144 *channel_mask); 2145 2146 /* Calculate channel address */ 2147 /* Remove the TAD offset */ 2148 2149 if (offset > addr) { 2150 sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!", 2151 offset, addr); 2152 return -EINVAL; 2153 } 2154 2155 ch_addr = addr - offset; 2156 ch_addr >>= (6 + shiftup); 2157 ch_addr /= sck_xch; 2158 ch_addr <<= (6 + shiftup); 2159 ch_addr |= addr & ((1 << (6 + shiftup)) - 1); 2160 2161 /* 2162 * Step 3) Decode rank 2163 */ 2164 for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) { 2165 pci_read_config_dword(pvt->pci_tad[base_ch], rir_way_limit[n_rir], ®); 2166 2167 if (!IS_RIR_VALID(reg)) 2168 continue; 2169 2170 limit = pvt->info.rir_limit(reg); 2171 gb = div_u64_rem(limit >> 20, 1024, &mb); 2172 edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n", 2173 n_rir, 2174 gb, (mb*1000)/1024, 2175 limit, 2176 1 << RIR_WAY(reg)); 2177 if (ch_addr <= limit) 2178 break; 2179 } 2180 if (n_rir == MAX_RIR_RANGES) { 2181 sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx", 2182 ch_addr); 2183 return -EINVAL; 2184 } 2185 rir_way = RIR_WAY(reg); 2186 2187 if (pvt->is_close_pg) 2188 idx = (ch_addr >> 6); 2189 else 2190 idx = (ch_addr >> 13); /* FIXME: Datasheet says to shift by 15 */ 2191 idx %= 1 << rir_way; 2192 2193 pci_read_config_dword(pvt->pci_tad[base_ch], rir_offset[n_rir][idx], ®); 2194 *rank = RIR_RNK_TGT(pvt->info.type, reg); 2195 2196 edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n", 2197 n_rir, 2198 ch_addr, 2199 limit, 2200 rir_way, 2201 idx); 2202 2203 return 0; 2204 } 2205 2206 /**************************************************************************** 2207 Device initialization routines: put/get, init/exit 2208 ****************************************************************************/ 2209 2210 /* 2211 * sbridge_put_all_devices 'put' all the devices that we have 2212 * reserved via 'get' 2213 */ 2214 static void sbridge_put_devices(struct sbridge_dev *sbridge_dev) 2215 { 2216 int i; 2217 2218 edac_dbg(0, "\n"); 2219 for (i = 0; i < sbridge_dev->n_devs; i++) { 2220 struct pci_dev *pdev = sbridge_dev->pdev[i]; 2221 if (!pdev) 2222 continue; 2223 edac_dbg(0, "Removing dev %02x:%02x.%d\n", 2224 pdev->bus->number, 2225 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 2226 pci_dev_put(pdev); 2227 } 2228 } 2229 2230 static void sbridge_put_all_devices(void) 2231 { 2232 struct sbridge_dev *sbridge_dev, *tmp; 2233 2234 list_for_each_entry_safe(sbridge_dev, tmp, &sbridge_edac_list, list) { 2235 sbridge_put_devices(sbridge_dev); 2236 free_sbridge_dev(sbridge_dev); 2237 } 2238 } 2239 2240 static int sbridge_get_onedevice(struct pci_dev **prev, 2241 u8 *num_mc, 2242 const struct pci_id_table *table, 2243 const unsigned devno, 2244 const int multi_bus) 2245 { 2246 struct sbridge_dev *sbridge_dev = NULL; 2247 const struct pci_id_descr *dev_descr = &table->descr[devno]; 2248 struct pci_dev *pdev = NULL; 2249 u8 bus = 0; 2250 int i = 0; 2251 2252 sbridge_printk(KERN_DEBUG, 2253 "Seeking for: PCI ID %04x:%04x\n", 2254 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 2255 2256 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 2257 dev_descr->dev_id, *prev); 2258 2259 if (!pdev) { 2260 if (*prev) { 2261 *prev = pdev; 2262 return 0; 2263 } 2264 2265 if (dev_descr->optional) 2266 return 0; 2267 2268 /* if the HA wasn't found */ 2269 if (devno == 0) 2270 return -ENODEV; 2271 2272 sbridge_printk(KERN_INFO, 2273 "Device not found: %04x:%04x\n", 2274 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 2275 2276 /* End of list, leave */ 2277 return -ENODEV; 2278 } 2279 bus = pdev->bus->number; 2280 2281 next_imc: 2282 sbridge_dev = get_sbridge_dev(bus, dev_descr->dom, multi_bus, sbridge_dev); 2283 if (!sbridge_dev) { 2284 /* If the HA1 wasn't found, don't create EDAC second memory controller */ 2285 if (dev_descr->dom == IMC1 && devno != 1) { 2286 edac_dbg(0, "Skip IMC1: %04x:%04x (since HA1 was absent)\n", 2287 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 2288 pci_dev_put(pdev); 2289 return 0; 2290 } 2291 2292 if (dev_descr->dom == SOCK) 2293 goto out_imc; 2294 2295 sbridge_dev = alloc_sbridge_dev(bus, dev_descr->dom, table); 2296 if (!sbridge_dev) { 2297 pci_dev_put(pdev); 2298 return -ENOMEM; 2299 } 2300 (*num_mc)++; 2301 } 2302 2303 if (sbridge_dev->pdev[sbridge_dev->i_devs]) { 2304 sbridge_printk(KERN_ERR, 2305 "Duplicated device for %04x:%04x\n", 2306 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 2307 pci_dev_put(pdev); 2308 return -ENODEV; 2309 } 2310 2311 sbridge_dev->pdev[sbridge_dev->i_devs++] = pdev; 2312 2313 /* pdev belongs to more than one IMC, do extra gets */ 2314 if (++i > 1) 2315 pci_dev_get(pdev); 2316 2317 if (dev_descr->dom == SOCK && i < table->n_imcs_per_sock) 2318 goto next_imc; 2319 2320 out_imc: 2321 /* Be sure that the device is enabled */ 2322 if (unlikely(pci_enable_device(pdev) < 0)) { 2323 sbridge_printk(KERN_ERR, 2324 "Couldn't enable %04x:%04x\n", 2325 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 2326 return -ENODEV; 2327 } 2328 2329 edac_dbg(0, "Detected %04x:%04x\n", 2330 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 2331 2332 /* 2333 * As stated on drivers/pci/search.c, the reference count for 2334 * @from is always decremented if it is not %NULL. So, as we need 2335 * to get all devices up to null, we need to do a get for the device 2336 */ 2337 pci_dev_get(pdev); 2338 2339 *prev = pdev; 2340 2341 return 0; 2342 } 2343 2344 /* 2345 * sbridge_get_all_devices - Find and perform 'get' operation on the MCH's 2346 * devices we want to reference for this driver. 2347 * @num_mc: pointer to the memory controllers count, to be incremented in case 2348 * of success. 2349 * @table: model specific table 2350 * 2351 * returns 0 in case of success or error code 2352 */ 2353 static int sbridge_get_all_devices(u8 *num_mc, 2354 const struct pci_id_table *table) 2355 { 2356 int i, rc; 2357 struct pci_dev *pdev = NULL; 2358 int allow_dups = 0; 2359 int multi_bus = 0; 2360 2361 if (table->type == KNIGHTS_LANDING) 2362 allow_dups = multi_bus = 1; 2363 while (table && table->descr) { 2364 for (i = 0; i < table->n_devs_per_sock; i++) { 2365 if (!allow_dups || i == 0 || 2366 table->descr[i].dev_id != 2367 table->descr[i-1].dev_id) { 2368 pdev = NULL; 2369 } 2370 do { 2371 rc = sbridge_get_onedevice(&pdev, num_mc, 2372 table, i, multi_bus); 2373 if (rc < 0) { 2374 if (i == 0) { 2375 i = table->n_devs_per_sock; 2376 break; 2377 } 2378 sbridge_put_all_devices(); 2379 return -ENODEV; 2380 } 2381 } while (pdev && !allow_dups); 2382 } 2383 table++; 2384 } 2385 2386 return 0; 2387 } 2388 2389 /* 2390 * Device IDs for {SBRIDGE,IBRIDGE,HASWELL,BROADWELL}_IMC_HA0_TAD0 are in 2391 * the format: XXXa. So we can convert from a device to the corresponding 2392 * channel like this 2393 */ 2394 #define TAD_DEV_TO_CHAN(dev) (((dev) & 0xf) - 0xa) 2395 2396 static int sbridge_mci_bind_devs(struct mem_ctl_info *mci, 2397 struct sbridge_dev *sbridge_dev) 2398 { 2399 struct sbridge_pvt *pvt = mci->pvt_info; 2400 struct pci_dev *pdev; 2401 u8 saw_chan_mask = 0; 2402 int i; 2403 2404 for (i = 0; i < sbridge_dev->n_devs; i++) { 2405 pdev = sbridge_dev->pdev[i]; 2406 if (!pdev) 2407 continue; 2408 2409 switch (pdev->device) { 2410 case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0: 2411 pvt->pci_sad0 = pdev; 2412 break; 2413 case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1: 2414 pvt->pci_sad1 = pdev; 2415 break; 2416 case PCI_DEVICE_ID_INTEL_SBRIDGE_BR: 2417 pvt->pci_br0 = pdev; 2418 break; 2419 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0: 2420 pvt->pci_ha = pdev; 2421 break; 2422 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA: 2423 pvt->pci_ta = pdev; 2424 break; 2425 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS: 2426 pvt->pci_ras = pdev; 2427 break; 2428 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0: 2429 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1: 2430 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2: 2431 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3: 2432 { 2433 int id = TAD_DEV_TO_CHAN(pdev->device); 2434 pvt->pci_tad[id] = pdev; 2435 saw_chan_mask |= 1 << id; 2436 } 2437 break; 2438 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO: 2439 pvt->pci_ddrio = pdev; 2440 break; 2441 default: 2442 goto error; 2443 } 2444 2445 edac_dbg(0, "Associated PCI %02x:%02x, bus %d with dev = %p\n", 2446 pdev->vendor, pdev->device, 2447 sbridge_dev->bus, 2448 pdev); 2449 } 2450 2451 /* Check if everything were registered */ 2452 if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha || 2453 !pvt->pci_ras || !pvt->pci_ta) 2454 goto enodev; 2455 2456 if (saw_chan_mask != 0x0f) 2457 goto enodev; 2458 return 0; 2459 2460 enodev: 2461 sbridge_printk(KERN_ERR, "Some needed devices are missing\n"); 2462 return -ENODEV; 2463 2464 error: 2465 sbridge_printk(KERN_ERR, "Unexpected device %02x:%02x\n", 2466 PCI_VENDOR_ID_INTEL, pdev->device); 2467 return -EINVAL; 2468 } 2469 2470 static int ibridge_mci_bind_devs(struct mem_ctl_info *mci, 2471 struct sbridge_dev *sbridge_dev) 2472 { 2473 struct sbridge_pvt *pvt = mci->pvt_info; 2474 struct pci_dev *pdev; 2475 u8 saw_chan_mask = 0; 2476 int i; 2477 2478 for (i = 0; i < sbridge_dev->n_devs; i++) { 2479 pdev = sbridge_dev->pdev[i]; 2480 if (!pdev) 2481 continue; 2482 2483 switch (pdev->device) { 2484 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0: 2485 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1: 2486 pvt->pci_ha = pdev; 2487 break; 2488 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA: 2489 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA: 2490 pvt->pci_ta = pdev; 2491 break; 2492 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS: 2493 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS: 2494 pvt->pci_ras = pdev; 2495 break; 2496 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0: 2497 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1: 2498 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2: 2499 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3: 2500 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0: 2501 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1: 2502 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2: 2503 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3: 2504 { 2505 int id = TAD_DEV_TO_CHAN(pdev->device); 2506 pvt->pci_tad[id] = pdev; 2507 saw_chan_mask |= 1 << id; 2508 } 2509 break; 2510 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0: 2511 pvt->pci_ddrio = pdev; 2512 break; 2513 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0: 2514 pvt->pci_ddrio = pdev; 2515 break; 2516 case PCI_DEVICE_ID_INTEL_IBRIDGE_SAD: 2517 pvt->pci_sad0 = pdev; 2518 break; 2519 case PCI_DEVICE_ID_INTEL_IBRIDGE_BR0: 2520 pvt->pci_br0 = pdev; 2521 break; 2522 case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1: 2523 pvt->pci_br1 = pdev; 2524 break; 2525 default: 2526 goto error; 2527 } 2528 2529 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n", 2530 sbridge_dev->bus, 2531 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), 2532 pdev); 2533 } 2534 2535 /* Check if everything were registered */ 2536 if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_br0 || 2537 !pvt->pci_br1 || !pvt->pci_ras || !pvt->pci_ta) 2538 goto enodev; 2539 2540 if (saw_chan_mask != 0x0f && /* -EN/-EX */ 2541 saw_chan_mask != 0x03) /* -EP */ 2542 goto enodev; 2543 return 0; 2544 2545 enodev: 2546 sbridge_printk(KERN_ERR, "Some needed devices are missing\n"); 2547 return -ENODEV; 2548 2549 error: 2550 sbridge_printk(KERN_ERR, 2551 "Unexpected device %02x:%02x\n", PCI_VENDOR_ID_INTEL, 2552 pdev->device); 2553 return -EINVAL; 2554 } 2555 2556 static int haswell_mci_bind_devs(struct mem_ctl_info *mci, 2557 struct sbridge_dev *sbridge_dev) 2558 { 2559 struct sbridge_pvt *pvt = mci->pvt_info; 2560 struct pci_dev *pdev; 2561 u8 saw_chan_mask = 0; 2562 int i; 2563 2564 /* there's only one device per system; not tied to any bus */ 2565 if (pvt->info.pci_vtd == NULL) 2566 /* result will be checked later */ 2567 pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL, 2568 PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC, 2569 NULL); 2570 2571 for (i = 0; i < sbridge_dev->n_devs; i++) { 2572 pdev = sbridge_dev->pdev[i]; 2573 if (!pdev) 2574 continue; 2575 2576 switch (pdev->device) { 2577 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0: 2578 pvt->pci_sad0 = pdev; 2579 break; 2580 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1: 2581 pvt->pci_sad1 = pdev; 2582 break; 2583 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0: 2584 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1: 2585 pvt->pci_ha = pdev; 2586 break; 2587 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA: 2588 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA: 2589 pvt->pci_ta = pdev; 2590 break; 2591 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM: 2592 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM: 2593 pvt->pci_ras = pdev; 2594 break; 2595 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0: 2596 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1: 2597 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2: 2598 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3: 2599 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0: 2600 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1: 2601 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2: 2602 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3: 2603 { 2604 int id = TAD_DEV_TO_CHAN(pdev->device); 2605 pvt->pci_tad[id] = pdev; 2606 saw_chan_mask |= 1 << id; 2607 } 2608 break; 2609 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0: 2610 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1: 2611 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2: 2612 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3: 2613 if (!pvt->pci_ddrio) 2614 pvt->pci_ddrio = pdev; 2615 break; 2616 default: 2617 break; 2618 } 2619 2620 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n", 2621 sbridge_dev->bus, 2622 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), 2623 pdev); 2624 } 2625 2626 /* Check if everything were registered */ 2627 if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 || 2628 !pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd) 2629 goto enodev; 2630 2631 if (saw_chan_mask != 0x0f && /* -EN/-EX */ 2632 saw_chan_mask != 0x03) /* -EP */ 2633 goto enodev; 2634 return 0; 2635 2636 enodev: 2637 sbridge_printk(KERN_ERR, "Some needed devices are missing\n"); 2638 return -ENODEV; 2639 } 2640 2641 static int broadwell_mci_bind_devs(struct mem_ctl_info *mci, 2642 struct sbridge_dev *sbridge_dev) 2643 { 2644 struct sbridge_pvt *pvt = mci->pvt_info; 2645 struct pci_dev *pdev; 2646 u8 saw_chan_mask = 0; 2647 int i; 2648 2649 /* there's only one device per system; not tied to any bus */ 2650 if (pvt->info.pci_vtd == NULL) 2651 /* result will be checked later */ 2652 pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL, 2653 PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC, 2654 NULL); 2655 2656 for (i = 0; i < sbridge_dev->n_devs; i++) { 2657 pdev = sbridge_dev->pdev[i]; 2658 if (!pdev) 2659 continue; 2660 2661 switch (pdev->device) { 2662 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0: 2663 pvt->pci_sad0 = pdev; 2664 break; 2665 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1: 2666 pvt->pci_sad1 = pdev; 2667 break; 2668 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0: 2669 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1: 2670 pvt->pci_ha = pdev; 2671 break; 2672 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA: 2673 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA: 2674 pvt->pci_ta = pdev; 2675 break; 2676 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM: 2677 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM: 2678 pvt->pci_ras = pdev; 2679 break; 2680 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0: 2681 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1: 2682 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2: 2683 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3: 2684 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0: 2685 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1: 2686 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2: 2687 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3: 2688 { 2689 int id = TAD_DEV_TO_CHAN(pdev->device); 2690 pvt->pci_tad[id] = pdev; 2691 saw_chan_mask |= 1 << id; 2692 } 2693 break; 2694 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0: 2695 pvt->pci_ddrio = pdev; 2696 break; 2697 default: 2698 break; 2699 } 2700 2701 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n", 2702 sbridge_dev->bus, 2703 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), 2704 pdev); 2705 } 2706 2707 /* Check if everything were registered */ 2708 if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 || 2709 !pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd) 2710 goto enodev; 2711 2712 if (saw_chan_mask != 0x0f && /* -EN/-EX */ 2713 saw_chan_mask != 0x03) /* -EP */ 2714 goto enodev; 2715 return 0; 2716 2717 enodev: 2718 sbridge_printk(KERN_ERR, "Some needed devices are missing\n"); 2719 return -ENODEV; 2720 } 2721 2722 static int knl_mci_bind_devs(struct mem_ctl_info *mci, 2723 struct sbridge_dev *sbridge_dev) 2724 { 2725 struct sbridge_pvt *pvt = mci->pvt_info; 2726 struct pci_dev *pdev; 2727 int dev, func; 2728 2729 int i; 2730 int devidx; 2731 2732 for (i = 0; i < sbridge_dev->n_devs; i++) { 2733 pdev = sbridge_dev->pdev[i]; 2734 if (!pdev) 2735 continue; 2736 2737 /* Extract PCI device and function. */ 2738 dev = (pdev->devfn >> 3) & 0x1f; 2739 func = pdev->devfn & 0x7; 2740 2741 switch (pdev->device) { 2742 case PCI_DEVICE_ID_INTEL_KNL_IMC_MC: 2743 if (dev == 8) 2744 pvt->knl.pci_mc0 = pdev; 2745 else if (dev == 9) 2746 pvt->knl.pci_mc1 = pdev; 2747 else { 2748 sbridge_printk(KERN_ERR, 2749 "Memory controller in unexpected place! (dev %d, fn %d)\n", 2750 dev, func); 2751 continue; 2752 } 2753 break; 2754 2755 case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0: 2756 pvt->pci_sad0 = pdev; 2757 break; 2758 2759 case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1: 2760 pvt->pci_sad1 = pdev; 2761 break; 2762 2763 case PCI_DEVICE_ID_INTEL_KNL_IMC_CHA: 2764 /* There are one of these per tile, and range from 2765 * 1.14.0 to 1.18.5. 2766 */ 2767 devidx = ((dev-14)*8)+func; 2768 2769 if (devidx < 0 || devidx >= KNL_MAX_CHAS) { 2770 sbridge_printk(KERN_ERR, 2771 "Caching and Home Agent in unexpected place! (dev %d, fn %d)\n", 2772 dev, func); 2773 continue; 2774 } 2775 2776 WARN_ON(pvt->knl.pci_cha[devidx] != NULL); 2777 2778 pvt->knl.pci_cha[devidx] = pdev; 2779 break; 2780 2781 case PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN: 2782 devidx = -1; 2783 2784 /* 2785 * MC0 channels 0-2 are device 9 function 2-4, 2786 * MC1 channels 3-5 are device 8 function 2-4. 2787 */ 2788 2789 if (dev == 9) 2790 devidx = func-2; 2791 else if (dev == 8) 2792 devidx = 3 + (func-2); 2793 2794 if (devidx < 0 || devidx >= KNL_MAX_CHANNELS) { 2795 sbridge_printk(KERN_ERR, 2796 "DRAM Channel Registers in unexpected place! (dev %d, fn %d)\n", 2797 dev, func); 2798 continue; 2799 } 2800 2801 WARN_ON(pvt->knl.pci_channel[devidx] != NULL); 2802 pvt->knl.pci_channel[devidx] = pdev; 2803 break; 2804 2805 case PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM: 2806 pvt->knl.pci_mc_info = pdev; 2807 break; 2808 2809 case PCI_DEVICE_ID_INTEL_KNL_IMC_TA: 2810 pvt->pci_ta = pdev; 2811 break; 2812 2813 default: 2814 sbridge_printk(KERN_ERR, "Unexpected device %d\n", 2815 pdev->device); 2816 break; 2817 } 2818 } 2819 2820 if (!pvt->knl.pci_mc0 || !pvt->knl.pci_mc1 || 2821 !pvt->pci_sad0 || !pvt->pci_sad1 || 2822 !pvt->pci_ta) { 2823 goto enodev; 2824 } 2825 2826 for (i = 0; i < KNL_MAX_CHANNELS; i++) { 2827 if (!pvt->knl.pci_channel[i]) { 2828 sbridge_printk(KERN_ERR, "Missing channel %d\n", i); 2829 goto enodev; 2830 } 2831 } 2832 2833 for (i = 0; i < KNL_MAX_CHAS; i++) { 2834 if (!pvt->knl.pci_cha[i]) { 2835 sbridge_printk(KERN_ERR, "Missing CHA %d\n", i); 2836 goto enodev; 2837 } 2838 } 2839 2840 return 0; 2841 2842 enodev: 2843 sbridge_printk(KERN_ERR, "Some needed devices are missing\n"); 2844 return -ENODEV; 2845 } 2846 2847 /**************************************************************************** 2848 Error check routines 2849 ****************************************************************************/ 2850 2851 /* 2852 * While Sandy Bridge has error count registers, SMI BIOS read values from 2853 * and resets the counters. So, they are not reliable for the OS to read 2854 * from them. So, we have no option but to just trust on whatever MCE is 2855 * telling us about the errors. 2856 */ 2857 static void sbridge_mce_output_error(struct mem_ctl_info *mci, 2858 const struct mce *m) 2859 { 2860 struct mem_ctl_info *new_mci; 2861 struct sbridge_pvt *pvt = mci->pvt_info; 2862 enum hw_event_mc_err_type tp_event; 2863 char *type, *optype, msg[256]; 2864 bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0); 2865 bool overflow = GET_BITFIELD(m->status, 62, 62); 2866 bool uncorrected_error = GET_BITFIELD(m->status, 61, 61); 2867 bool recoverable; 2868 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52); 2869 u32 mscod = GET_BITFIELD(m->status, 16, 31); 2870 u32 errcode = GET_BITFIELD(m->status, 0, 15); 2871 u32 channel = GET_BITFIELD(m->status, 0, 3); 2872 u32 optypenum = GET_BITFIELD(m->status, 4, 6); 2873 long channel_mask, first_channel; 2874 u8 rank, socket, ha; 2875 int rc, dimm; 2876 char *area_type = NULL; 2877 2878 if (pvt->info.type != SANDY_BRIDGE) 2879 recoverable = true; 2880 else 2881 recoverable = GET_BITFIELD(m->status, 56, 56); 2882 2883 if (uncorrected_error) { 2884 if (ripv) { 2885 type = "FATAL"; 2886 tp_event = HW_EVENT_ERR_FATAL; 2887 } else { 2888 type = "NON_FATAL"; 2889 tp_event = HW_EVENT_ERR_UNCORRECTED; 2890 } 2891 } else { 2892 type = "CORRECTED"; 2893 tp_event = HW_EVENT_ERR_CORRECTED; 2894 } 2895 2896 /* 2897 * According with Table 15-9 of the Intel Architecture spec vol 3A, 2898 * memory errors should fit in this mask: 2899 * 000f 0000 1mmm cccc (binary) 2900 * where: 2901 * f = Correction Report Filtering Bit. If 1, subsequent errors 2902 * won't be shown 2903 * mmm = error type 2904 * cccc = channel 2905 * If the mask doesn't match, report an error to the parsing logic 2906 */ 2907 if (! ((errcode & 0xef80) == 0x80)) { 2908 optype = "Can't parse: it is not a mem"; 2909 } else { 2910 switch (optypenum) { 2911 case 0: 2912 optype = "generic undef request error"; 2913 break; 2914 case 1: 2915 optype = "memory read error"; 2916 break; 2917 case 2: 2918 optype = "memory write error"; 2919 break; 2920 case 3: 2921 optype = "addr/cmd error"; 2922 break; 2923 case 4: 2924 optype = "memory scrubbing error"; 2925 break; 2926 default: 2927 optype = "reserved"; 2928 break; 2929 } 2930 } 2931 2932 /* Only decode errors with an valid address (ADDRV) */ 2933 if (!GET_BITFIELD(m->status, 58, 58)) 2934 return; 2935 2936 if (pvt->info.type == KNIGHTS_LANDING) { 2937 if (channel == 14) { 2938 edac_dbg(0, "%s%s err_code:%04x:%04x EDRAM bank %d\n", 2939 overflow ? " OVERFLOW" : "", 2940 (uncorrected_error && recoverable) 2941 ? " recoverable" : "", 2942 mscod, errcode, 2943 m->bank); 2944 } else { 2945 char A = *("A"); 2946 2947 /* 2948 * Reported channel is in range 0-2, so we can't map it 2949 * back to mc. To figure out mc we check machine check 2950 * bank register that reported this error. 2951 * bank15 means mc0 and bank16 means mc1. 2952 */ 2953 channel = knl_channel_remap(m->bank == 16, channel); 2954 channel_mask = 1 << channel; 2955 2956 snprintf(msg, sizeof(msg), 2957 "%s%s err_code:%04x:%04x channel:%d (DIMM_%c)", 2958 overflow ? " OVERFLOW" : "", 2959 (uncorrected_error && recoverable) 2960 ? " recoverable" : " ", 2961 mscod, errcode, channel, A + channel); 2962 edac_mc_handle_error(tp_event, mci, core_err_cnt, 2963 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, 2964 channel, 0, -1, 2965 optype, msg); 2966 } 2967 return; 2968 } else { 2969 rc = get_memory_error_data(mci, m->addr, &socket, &ha, 2970 &channel_mask, &rank, &area_type, msg); 2971 } 2972 2973 if (rc < 0) 2974 goto err_parsing; 2975 new_mci = get_mci_for_node_id(socket, ha); 2976 if (!new_mci) { 2977 strcpy(msg, "Error: socket got corrupted!"); 2978 goto err_parsing; 2979 } 2980 mci = new_mci; 2981 pvt = mci->pvt_info; 2982 2983 first_channel = find_first_bit(&channel_mask, NUM_CHANNELS); 2984 2985 if (rank < 4) 2986 dimm = 0; 2987 else if (rank < 8) 2988 dimm = 1; 2989 else 2990 dimm = 2; 2991 2992 2993 /* 2994 * FIXME: On some memory configurations (mirror, lockstep), the 2995 * Memory Controller can't point the error to a single DIMM. The 2996 * EDAC core should be handling the channel mask, in order to point 2997 * to the group of dimm's where the error may be happening. 2998 */ 2999 if (!pvt->is_lockstep && !pvt->is_cur_addr_mirrored && !pvt->is_close_pg) 3000 channel = first_channel; 3001 3002 snprintf(msg, sizeof(msg), 3003 "%s%s area:%s err_code:%04x:%04x socket:%d ha:%d channel_mask:%ld rank:%d", 3004 overflow ? " OVERFLOW" : "", 3005 (uncorrected_error && recoverable) ? " recoverable" : "", 3006 area_type, 3007 mscod, errcode, 3008 socket, ha, 3009 channel_mask, 3010 rank); 3011 3012 edac_dbg(0, "%s\n", msg); 3013 3014 /* FIXME: need support for channel mask */ 3015 3016 if (channel == CHANNEL_UNSPECIFIED) 3017 channel = -1; 3018 3019 /* Call the helper to output message */ 3020 edac_mc_handle_error(tp_event, mci, core_err_cnt, 3021 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, 3022 channel, dimm, -1, 3023 optype, msg); 3024 return; 3025 err_parsing: 3026 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, 3027 -1, -1, -1, 3028 msg, ""); 3029 3030 } 3031 3032 /* 3033 * Check that logging is enabled and that this is the right type 3034 * of error for us to handle. 3035 */ 3036 static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val, 3037 void *data) 3038 { 3039 struct mce *mce = (struct mce *)data; 3040 struct mem_ctl_info *mci; 3041 struct sbridge_pvt *pvt; 3042 char *type; 3043 3044 if (edac_get_report_status() == EDAC_REPORTING_DISABLED) 3045 return NOTIFY_DONE; 3046 3047 mci = get_mci_for_node_id(mce->socketid, IMC0); 3048 if (!mci) 3049 return NOTIFY_DONE; 3050 pvt = mci->pvt_info; 3051 3052 /* 3053 * Just let mcelog handle it if the error is 3054 * outside the memory controller. A memory error 3055 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0. 3056 * bit 12 has an special meaning. 3057 */ 3058 if ((mce->status & 0xefff) >> 7 != 1) 3059 return NOTIFY_DONE; 3060 3061 if (mce->mcgstatus & MCG_STATUS_MCIP) 3062 type = "Exception"; 3063 else 3064 type = "Event"; 3065 3066 sbridge_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n"); 3067 3068 sbridge_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx " 3069 "Bank %d: %016Lx\n", mce->extcpu, type, 3070 mce->mcgstatus, mce->bank, mce->status); 3071 sbridge_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc); 3072 sbridge_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr); 3073 sbridge_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc); 3074 3075 sbridge_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET " 3076 "%u APIC %x\n", mce->cpuvendor, mce->cpuid, 3077 mce->time, mce->socketid, mce->apicid); 3078 3079 sbridge_mce_output_error(mci, mce); 3080 3081 /* Advice mcelog that the error were handled */ 3082 return NOTIFY_STOP; 3083 } 3084 3085 static struct notifier_block sbridge_mce_dec = { 3086 .notifier_call = sbridge_mce_check_error, 3087 .priority = MCE_PRIO_EDAC, 3088 }; 3089 3090 /**************************************************************************** 3091 EDAC register/unregister logic 3092 ****************************************************************************/ 3093 3094 static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev) 3095 { 3096 struct mem_ctl_info *mci = sbridge_dev->mci; 3097 struct sbridge_pvt *pvt; 3098 3099 if (unlikely(!mci || !mci->pvt_info)) { 3100 edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev); 3101 3102 sbridge_printk(KERN_ERR, "Couldn't find mci handler\n"); 3103 return; 3104 } 3105 3106 pvt = mci->pvt_info; 3107 3108 edac_dbg(0, "MC: mci = %p, dev = %p\n", 3109 mci, &sbridge_dev->pdev[0]->dev); 3110 3111 /* Remove MC sysfs nodes */ 3112 edac_mc_del_mc(mci->pdev); 3113 3114 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name); 3115 kfree(mci->ctl_name); 3116 edac_mc_free(mci); 3117 sbridge_dev->mci = NULL; 3118 } 3119 3120 static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) 3121 { 3122 struct mem_ctl_info *mci; 3123 struct edac_mc_layer layers[2]; 3124 struct sbridge_pvt *pvt; 3125 struct pci_dev *pdev = sbridge_dev->pdev[0]; 3126 int rc; 3127 3128 /* allocate a new MC control structure */ 3129 layers[0].type = EDAC_MC_LAYER_CHANNEL; 3130 layers[0].size = type == KNIGHTS_LANDING ? 3131 KNL_MAX_CHANNELS : NUM_CHANNELS; 3132 layers[0].is_virt_csrow = false; 3133 layers[1].type = EDAC_MC_LAYER_SLOT; 3134 layers[1].size = type == KNIGHTS_LANDING ? 1 : MAX_DIMMS; 3135 layers[1].is_virt_csrow = true; 3136 mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers, 3137 sizeof(*pvt)); 3138 3139 if (unlikely(!mci)) 3140 return -ENOMEM; 3141 3142 edac_dbg(0, "MC: mci = %p, dev = %p\n", 3143 mci, &pdev->dev); 3144 3145 pvt = mci->pvt_info; 3146 memset(pvt, 0, sizeof(*pvt)); 3147 3148 /* Associate sbridge_dev and mci for future usage */ 3149 pvt->sbridge_dev = sbridge_dev; 3150 sbridge_dev->mci = mci; 3151 3152 mci->mtype_cap = type == KNIGHTS_LANDING ? 3153 MEM_FLAG_DDR4 : MEM_FLAG_DDR3; 3154 mci->edac_ctl_cap = EDAC_FLAG_NONE; 3155 mci->edac_cap = EDAC_FLAG_NONE; 3156 mci->mod_name = EDAC_MOD_STR; 3157 mci->dev_name = pci_name(pdev); 3158 mci->ctl_page_to_phys = NULL; 3159 3160 pvt->info.type = type; 3161 switch (type) { 3162 case IVY_BRIDGE: 3163 pvt->info.rankcfgr = IB_RANK_CFG_A; 3164 pvt->info.get_tolm = ibridge_get_tolm; 3165 pvt->info.get_tohm = ibridge_get_tohm; 3166 pvt->info.dram_rule = ibridge_dram_rule; 3167 pvt->info.get_memory_type = get_memory_type; 3168 pvt->info.get_node_id = get_node_id; 3169 pvt->info.rir_limit = rir_limit; 3170 pvt->info.sad_limit = sad_limit; 3171 pvt->info.interleave_mode = interleave_mode; 3172 pvt->info.dram_attr = dram_attr; 3173 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule); 3174 pvt->info.interleave_list = ibridge_interleave_list; 3175 pvt->info.interleave_pkg = ibridge_interleave_pkg; 3176 pvt->info.get_width = ibridge_get_width; 3177 3178 /* Store pci devices at mci for faster access */ 3179 rc = ibridge_mci_bind_devs(mci, sbridge_dev); 3180 if (unlikely(rc < 0)) 3181 goto fail0; 3182 get_source_id(mci); 3183 mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge SrcID#%d_Ha#%d", 3184 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom); 3185 break; 3186 case SANDY_BRIDGE: 3187 pvt->info.rankcfgr = SB_RANK_CFG_A; 3188 pvt->info.get_tolm = sbridge_get_tolm; 3189 pvt->info.get_tohm = sbridge_get_tohm; 3190 pvt->info.dram_rule = sbridge_dram_rule; 3191 pvt->info.get_memory_type = get_memory_type; 3192 pvt->info.get_node_id = get_node_id; 3193 pvt->info.rir_limit = rir_limit; 3194 pvt->info.sad_limit = sad_limit; 3195 pvt->info.interleave_mode = interleave_mode; 3196 pvt->info.dram_attr = dram_attr; 3197 pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule); 3198 pvt->info.interleave_list = sbridge_interleave_list; 3199 pvt->info.interleave_pkg = sbridge_interleave_pkg; 3200 pvt->info.get_width = sbridge_get_width; 3201 3202 /* Store pci devices at mci for faster access */ 3203 rc = sbridge_mci_bind_devs(mci, sbridge_dev); 3204 if (unlikely(rc < 0)) 3205 goto fail0; 3206 get_source_id(mci); 3207 mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge SrcID#%d_Ha#%d", 3208 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom); 3209 break; 3210 case HASWELL: 3211 /* rankcfgr isn't used */ 3212 pvt->info.get_tolm = haswell_get_tolm; 3213 pvt->info.get_tohm = haswell_get_tohm; 3214 pvt->info.dram_rule = ibridge_dram_rule; 3215 pvt->info.get_memory_type = haswell_get_memory_type; 3216 pvt->info.get_node_id = haswell_get_node_id; 3217 pvt->info.rir_limit = haswell_rir_limit; 3218 pvt->info.sad_limit = sad_limit; 3219 pvt->info.interleave_mode = interleave_mode; 3220 pvt->info.dram_attr = dram_attr; 3221 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule); 3222 pvt->info.interleave_list = ibridge_interleave_list; 3223 pvt->info.interleave_pkg = ibridge_interleave_pkg; 3224 pvt->info.get_width = ibridge_get_width; 3225 3226 /* Store pci devices at mci for faster access */ 3227 rc = haswell_mci_bind_devs(mci, sbridge_dev); 3228 if (unlikely(rc < 0)) 3229 goto fail0; 3230 get_source_id(mci); 3231 mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell SrcID#%d_Ha#%d", 3232 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom); 3233 break; 3234 case BROADWELL: 3235 /* rankcfgr isn't used */ 3236 pvt->info.get_tolm = haswell_get_tolm; 3237 pvt->info.get_tohm = haswell_get_tohm; 3238 pvt->info.dram_rule = ibridge_dram_rule; 3239 pvt->info.get_memory_type = haswell_get_memory_type; 3240 pvt->info.get_node_id = haswell_get_node_id; 3241 pvt->info.rir_limit = haswell_rir_limit; 3242 pvt->info.sad_limit = sad_limit; 3243 pvt->info.interleave_mode = interleave_mode; 3244 pvt->info.dram_attr = dram_attr; 3245 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule); 3246 pvt->info.interleave_list = ibridge_interleave_list; 3247 pvt->info.interleave_pkg = ibridge_interleave_pkg; 3248 pvt->info.get_width = broadwell_get_width; 3249 3250 /* Store pci devices at mci for faster access */ 3251 rc = broadwell_mci_bind_devs(mci, sbridge_dev); 3252 if (unlikely(rc < 0)) 3253 goto fail0; 3254 get_source_id(mci); 3255 mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell SrcID#%d_Ha#%d", 3256 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom); 3257 break; 3258 case KNIGHTS_LANDING: 3259 /* pvt->info.rankcfgr == ??? */ 3260 pvt->info.get_tolm = knl_get_tolm; 3261 pvt->info.get_tohm = knl_get_tohm; 3262 pvt->info.dram_rule = knl_dram_rule; 3263 pvt->info.get_memory_type = knl_get_memory_type; 3264 pvt->info.get_node_id = knl_get_node_id; 3265 pvt->info.rir_limit = NULL; 3266 pvt->info.sad_limit = knl_sad_limit; 3267 pvt->info.interleave_mode = knl_interleave_mode; 3268 pvt->info.dram_attr = dram_attr_knl; 3269 pvt->info.max_sad = ARRAY_SIZE(knl_dram_rule); 3270 pvt->info.interleave_list = knl_interleave_list; 3271 pvt->info.interleave_pkg = ibridge_interleave_pkg; 3272 pvt->info.get_width = knl_get_width; 3273 3274 rc = knl_mci_bind_devs(mci, sbridge_dev); 3275 if (unlikely(rc < 0)) 3276 goto fail0; 3277 get_source_id(mci); 3278 mci->ctl_name = kasprintf(GFP_KERNEL, "Knights Landing SrcID#%d_Ha#%d", 3279 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom); 3280 break; 3281 } 3282 3283 if (!mci->ctl_name) { 3284 rc = -ENOMEM; 3285 goto fail0; 3286 } 3287 3288 /* Get dimm basic config and the memory layout */ 3289 rc = get_dimm_config(mci); 3290 if (rc < 0) { 3291 edac_dbg(0, "MC: failed to get_dimm_config()\n"); 3292 goto fail; 3293 } 3294 get_memory_layout(mci); 3295 3296 /* record ptr to the generic device */ 3297 mci->pdev = &pdev->dev; 3298 3299 /* add this new MC control structure to EDAC's list of MCs */ 3300 if (unlikely(edac_mc_add_mc(mci))) { 3301 edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); 3302 rc = -EINVAL; 3303 goto fail; 3304 } 3305 3306 return 0; 3307 3308 fail: 3309 kfree(mci->ctl_name); 3310 fail0: 3311 edac_mc_free(mci); 3312 sbridge_dev->mci = NULL; 3313 return rc; 3314 } 3315 3316 #define ICPU(model, table) \ 3317 { X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table } 3318 3319 static const struct x86_cpu_id sbridge_cpuids[] = { 3320 ICPU(INTEL_FAM6_SANDYBRIDGE_X, pci_dev_descr_sbridge_table), 3321 ICPU(INTEL_FAM6_IVYBRIDGE_X, pci_dev_descr_ibridge_table), 3322 ICPU(INTEL_FAM6_HASWELL_X, pci_dev_descr_haswell_table), 3323 ICPU(INTEL_FAM6_BROADWELL_X, pci_dev_descr_broadwell_table), 3324 ICPU(INTEL_FAM6_BROADWELL_XEON_D, pci_dev_descr_broadwell_table), 3325 ICPU(INTEL_FAM6_XEON_PHI_KNL, pci_dev_descr_knl_table), 3326 ICPU(INTEL_FAM6_XEON_PHI_KNM, pci_dev_descr_knl_table), 3327 { } 3328 }; 3329 MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids); 3330 3331 /* 3332 * sbridge_probe Get all devices and register memory controllers 3333 * present. 3334 * return: 3335 * 0 for FOUND a device 3336 * < 0 for error code 3337 */ 3338 3339 static int sbridge_probe(const struct x86_cpu_id *id) 3340 { 3341 int rc = -ENODEV; 3342 u8 mc, num_mc = 0; 3343 struct sbridge_dev *sbridge_dev; 3344 struct pci_id_table *ptable = (struct pci_id_table *)id->driver_data; 3345 3346 /* get the pci devices we want to reserve for our use */ 3347 rc = sbridge_get_all_devices(&num_mc, ptable); 3348 3349 if (unlikely(rc < 0)) { 3350 edac_dbg(0, "couldn't get all devices\n"); 3351 goto fail0; 3352 } 3353 3354 mc = 0; 3355 3356 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { 3357 edac_dbg(0, "Registering MC#%d (%d of %d)\n", 3358 mc, mc + 1, num_mc); 3359 3360 sbridge_dev->mc = mc++; 3361 rc = sbridge_register_mci(sbridge_dev, ptable->type); 3362 if (unlikely(rc < 0)) 3363 goto fail1; 3364 } 3365 3366 sbridge_printk(KERN_INFO, "%s\n", SBRIDGE_REVISION); 3367 3368 return 0; 3369 3370 fail1: 3371 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) 3372 sbridge_unregister_mci(sbridge_dev); 3373 3374 sbridge_put_all_devices(); 3375 fail0: 3376 return rc; 3377 } 3378 3379 /* 3380 * sbridge_remove cleanup 3381 * 3382 */ 3383 static void sbridge_remove(void) 3384 { 3385 struct sbridge_dev *sbridge_dev; 3386 3387 edac_dbg(0, "\n"); 3388 3389 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) 3390 sbridge_unregister_mci(sbridge_dev); 3391 3392 /* Release PCI resources */ 3393 sbridge_put_all_devices(); 3394 } 3395 3396 /* 3397 * sbridge_init Module entry function 3398 * Try to initialize this module for its devices 3399 */ 3400 static int __init sbridge_init(void) 3401 { 3402 const struct x86_cpu_id *id; 3403 const char *owner; 3404 int rc; 3405 3406 edac_dbg(2, "\n"); 3407 3408 owner = edac_get_owner(); 3409 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR))) 3410 return -EBUSY; 3411 3412 id = x86_match_cpu(sbridge_cpuids); 3413 if (!id) 3414 return -ENODEV; 3415 3416 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 3417 opstate_init(); 3418 3419 rc = sbridge_probe(id); 3420 3421 if (rc >= 0) { 3422 mce_register_decode_chain(&sbridge_mce_dec); 3423 if (edac_get_report_status() == EDAC_REPORTING_DISABLED) 3424 sbridge_printk(KERN_WARNING, "Loading driver, error reporting disabled.\n"); 3425 return 0; 3426 } 3427 3428 sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n", 3429 rc); 3430 3431 return rc; 3432 } 3433 3434 /* 3435 * sbridge_exit() Module exit function 3436 * Unregister the driver 3437 */ 3438 static void __exit sbridge_exit(void) 3439 { 3440 edac_dbg(2, "\n"); 3441 sbridge_remove(); 3442 mce_unregister_decode_chain(&sbridge_mce_dec); 3443 } 3444 3445 module_init(sbridge_init); 3446 module_exit(sbridge_exit); 3447 3448 module_param(edac_op_state, int, 0444); 3449 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 3450 3451 MODULE_LICENSE("GPL"); 3452 MODULE_AUTHOR("Mauro Carvalho Chehab"); 3453 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)"); 3454 MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge and Ivy Bridge memory controllers - " 3455 SBRIDGE_REVISION); 3456