1 /* Intel Sandy Bridge -EN/-EP/-EX Memory Controller kernel module 2 * 3 * This driver supports the memory controllers found on the Intel 4 * processor family Sandy Bridge. 5 * 6 * This file may be distributed under the terms of the 7 * GNU General Public License version 2 only. 8 * 9 * Copyright (c) 2011 by: 10 * Mauro Carvalho Chehab 11 */ 12 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/pci.h> 16 #include <linux/pci_ids.h> 17 #include <linux/slab.h> 18 #include <linux/delay.h> 19 #include <linux/edac.h> 20 #include <linux/mmzone.h> 21 #include <linux/smp.h> 22 #include <linux/bitmap.h> 23 #include <linux/math64.h> 24 #include <asm/processor.h> 25 #include <asm/mce.h> 26 27 #include "edac_core.h" 28 29 /* Static vars */ 30 static LIST_HEAD(sbridge_edac_list); 31 static DEFINE_MUTEX(sbridge_edac_lock); 32 static int probed; 33 34 /* 35 * Alter this version for the module when modifications are made 36 */ 37 #define SBRIDGE_REVISION " Ver: 1.1.0 " 38 #define EDAC_MOD_STR "sbridge_edac" 39 40 /* 41 * Debug macros 42 */ 43 #define sbridge_printk(level, fmt, arg...) \ 44 edac_printk(level, "sbridge", fmt, ##arg) 45 46 #define sbridge_mc_printk(mci, level, fmt, arg...) \ 47 edac_mc_chipset_printk(mci, level, "sbridge", fmt, ##arg) 48 49 /* 50 * Get a bit field at register value <v>, from bit <lo> to bit <hi> 51 */ 52 #define GET_BITFIELD(v, lo, hi) \ 53 (((v) & GENMASK_ULL(hi, lo)) >> (lo)) 54 55 /* Devices 12 Function 6, Offsets 0x80 to 0xcc */ 56 static const u32 sbridge_dram_rule[] = { 57 0x80, 0x88, 0x90, 0x98, 0xa0, 58 0xa8, 0xb0, 0xb8, 0xc0, 0xc8, 59 }; 60 61 static const u32 ibridge_dram_rule[] = { 62 0x60, 0x68, 0x70, 0x78, 0x80, 63 0x88, 0x90, 0x98, 0xa0, 0xa8, 64 0xb0, 0xb8, 0xc0, 0xc8, 0xd0, 65 0xd8, 0xe0, 0xe8, 0xf0, 0xf8, 66 }; 67 68 #define SAD_LIMIT(reg) ((GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff) 69 #define DRAM_ATTR(reg) GET_BITFIELD(reg, 2, 3) 70 #define INTERLEAVE_MODE(reg) GET_BITFIELD(reg, 1, 1) 71 #define DRAM_RULE_ENABLE(reg) GET_BITFIELD(reg, 0, 0) 72 #define A7MODE(reg) GET_BITFIELD(reg, 26, 26) 73 74 static char *get_dram_attr(u32 reg) 75 { 76 switch(DRAM_ATTR(reg)) { 77 case 0: 78 return "DRAM"; 79 case 1: 80 return "MMCFG"; 81 case 2: 82 return "NXM"; 83 default: 84 return "unknown"; 85 } 86 } 87 88 static const u32 sbridge_interleave_list[] = { 89 0x84, 0x8c, 0x94, 0x9c, 0xa4, 90 0xac, 0xb4, 0xbc, 0xc4, 0xcc, 91 }; 92 93 static const u32 ibridge_interleave_list[] = { 94 0x64, 0x6c, 0x74, 0x7c, 0x84, 95 0x8c, 0x94, 0x9c, 0xa4, 0xac, 96 0xb4, 0xbc, 0xc4, 0xcc, 0xd4, 97 0xdc, 0xe4, 0xec, 0xf4, 0xfc, 98 }; 99 100 struct interleave_pkg { 101 unsigned char start; 102 unsigned char end; 103 }; 104 105 static const struct interleave_pkg sbridge_interleave_pkg[] = { 106 { 0, 2 }, 107 { 3, 5 }, 108 { 8, 10 }, 109 { 11, 13 }, 110 { 16, 18 }, 111 { 19, 21 }, 112 { 24, 26 }, 113 { 27, 29 }, 114 }; 115 116 static const struct interleave_pkg ibridge_interleave_pkg[] = { 117 { 0, 3 }, 118 { 4, 7 }, 119 { 8, 11 }, 120 { 12, 15 }, 121 { 16, 19 }, 122 { 20, 23 }, 123 { 24, 27 }, 124 { 28, 31 }, 125 }; 126 127 static inline int sad_pkg(const struct interleave_pkg *table, u32 reg, 128 int interleave) 129 { 130 return GET_BITFIELD(reg, table[interleave].start, 131 table[interleave].end); 132 } 133 134 /* Devices 12 Function 7 */ 135 136 #define TOLM 0x80 137 #define TOHM 0x84 138 #define HASWELL_TOLM 0xd0 139 #define HASWELL_TOHM_0 0xd4 140 #define HASWELL_TOHM_1 0xd8 141 142 #define GET_TOLM(reg) ((GET_BITFIELD(reg, 0, 3) << 28) | 0x3ffffff) 143 #define GET_TOHM(reg) ((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff) 144 145 /* Device 13 Function 6 */ 146 147 #define SAD_TARGET 0xf0 148 149 #define SOURCE_ID(reg) GET_BITFIELD(reg, 9, 11) 150 151 #define SAD_CONTROL 0xf4 152 153 /* Device 14 function 0 */ 154 155 static const u32 tad_dram_rule[] = { 156 0x40, 0x44, 0x48, 0x4c, 157 0x50, 0x54, 0x58, 0x5c, 158 0x60, 0x64, 0x68, 0x6c, 159 }; 160 #define MAX_TAD ARRAY_SIZE(tad_dram_rule) 161 162 #define TAD_LIMIT(reg) ((GET_BITFIELD(reg, 12, 31) << 26) | 0x3ffffff) 163 #define TAD_SOCK(reg) GET_BITFIELD(reg, 10, 11) 164 #define TAD_CH(reg) GET_BITFIELD(reg, 8, 9) 165 #define TAD_TGT3(reg) GET_BITFIELD(reg, 6, 7) 166 #define TAD_TGT2(reg) GET_BITFIELD(reg, 4, 5) 167 #define TAD_TGT1(reg) GET_BITFIELD(reg, 2, 3) 168 #define TAD_TGT0(reg) GET_BITFIELD(reg, 0, 1) 169 170 /* Device 15, function 0 */ 171 172 #define MCMTR 0x7c 173 174 #define IS_ECC_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 2, 2) 175 #define IS_LOCKSTEP_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 1, 1) 176 #define IS_CLOSE_PG(mcmtr) GET_BITFIELD(mcmtr, 0, 0) 177 178 /* Device 15, function 1 */ 179 180 #define RASENABLES 0xac 181 #define IS_MIRROR_ENABLED(reg) GET_BITFIELD(reg, 0, 0) 182 183 /* Device 15, functions 2-5 */ 184 185 static const int mtr_regs[] = { 186 0x80, 0x84, 0x88, 187 }; 188 189 #define RANK_DISABLE(mtr) GET_BITFIELD(mtr, 16, 19) 190 #define IS_DIMM_PRESENT(mtr) GET_BITFIELD(mtr, 14, 14) 191 #define RANK_CNT_BITS(mtr) GET_BITFIELD(mtr, 12, 13) 192 #define RANK_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 2, 4) 193 #define COL_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 0, 1) 194 195 static const u32 tad_ch_nilv_offset[] = { 196 0x90, 0x94, 0x98, 0x9c, 197 0xa0, 0xa4, 0xa8, 0xac, 198 0xb0, 0xb4, 0xb8, 0xbc, 199 }; 200 #define CHN_IDX_OFFSET(reg) GET_BITFIELD(reg, 28, 29) 201 #define TAD_OFFSET(reg) (GET_BITFIELD(reg, 6, 25) << 26) 202 203 static const u32 rir_way_limit[] = { 204 0x108, 0x10c, 0x110, 0x114, 0x118, 205 }; 206 #define MAX_RIR_RANGES ARRAY_SIZE(rir_way_limit) 207 208 #define IS_RIR_VALID(reg) GET_BITFIELD(reg, 31, 31) 209 #define RIR_WAY(reg) GET_BITFIELD(reg, 28, 29) 210 211 #define MAX_RIR_WAY 8 212 213 static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = { 214 { 0x120, 0x124, 0x128, 0x12c, 0x130, 0x134, 0x138, 0x13c }, 215 { 0x140, 0x144, 0x148, 0x14c, 0x150, 0x154, 0x158, 0x15c }, 216 { 0x160, 0x164, 0x168, 0x16c, 0x170, 0x174, 0x178, 0x17c }, 217 { 0x180, 0x184, 0x188, 0x18c, 0x190, 0x194, 0x198, 0x19c }, 218 { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc }, 219 }; 220 221 #define RIR_RNK_TGT(reg) GET_BITFIELD(reg, 16, 19) 222 #define RIR_OFFSET(reg) GET_BITFIELD(reg, 2, 14) 223 224 /* Device 16, functions 2-7 */ 225 226 /* 227 * FIXME: Implement the error count reads directly 228 */ 229 230 static const u32 correrrcnt[] = { 231 0x104, 0x108, 0x10c, 0x110, 232 }; 233 234 #define RANK_ODD_OV(reg) GET_BITFIELD(reg, 31, 31) 235 #define RANK_ODD_ERR_CNT(reg) GET_BITFIELD(reg, 16, 30) 236 #define RANK_EVEN_OV(reg) GET_BITFIELD(reg, 15, 15) 237 #define RANK_EVEN_ERR_CNT(reg) GET_BITFIELD(reg, 0, 14) 238 239 static const u32 correrrthrsld[] = { 240 0x11c, 0x120, 0x124, 0x128, 241 }; 242 243 #define RANK_ODD_ERR_THRSLD(reg) GET_BITFIELD(reg, 16, 30) 244 #define RANK_EVEN_ERR_THRSLD(reg) GET_BITFIELD(reg, 0, 14) 245 246 247 /* Device 17, function 0 */ 248 249 #define SB_RANK_CFG_A 0x0328 250 251 #define IB_RANK_CFG_A 0x0320 252 253 /* 254 * sbridge structs 255 */ 256 257 #define NUM_CHANNELS 4 258 #define MAX_DIMMS 3 /* Max DIMMS per channel */ 259 #define CHANNEL_UNSPECIFIED 0xf /* Intel IA32 SDM 15-14 */ 260 261 enum type { 262 SANDY_BRIDGE, 263 IVY_BRIDGE, 264 HASWELL, 265 BROADWELL, 266 }; 267 268 struct sbridge_pvt; 269 struct sbridge_info { 270 enum type type; 271 u32 mcmtr; 272 u32 rankcfgr; 273 u64 (*get_tolm)(struct sbridge_pvt *pvt); 274 u64 (*get_tohm)(struct sbridge_pvt *pvt); 275 u64 (*rir_limit)(u32 reg); 276 const u32 *dram_rule; 277 const u32 *interleave_list; 278 const struct interleave_pkg *interleave_pkg; 279 u8 max_sad; 280 u8 max_interleave; 281 u8 (*get_node_id)(struct sbridge_pvt *pvt); 282 enum mem_type (*get_memory_type)(struct sbridge_pvt *pvt); 283 struct pci_dev *pci_vtd; 284 }; 285 286 struct sbridge_channel { 287 u32 ranks; 288 u32 dimms; 289 }; 290 291 struct pci_id_descr { 292 int dev_id; 293 int optional; 294 }; 295 296 struct pci_id_table { 297 const struct pci_id_descr *descr; 298 int n_devs; 299 }; 300 301 struct sbridge_dev { 302 struct list_head list; 303 u8 bus, mc; 304 u8 node_id, source_id; 305 struct pci_dev **pdev; 306 int n_devs; 307 struct mem_ctl_info *mci; 308 }; 309 310 struct sbridge_pvt { 311 struct pci_dev *pci_ta, *pci_ddrio, *pci_ras; 312 struct pci_dev *pci_sad0, *pci_sad1; 313 struct pci_dev *pci_ha0, *pci_ha1; 314 struct pci_dev *pci_br0, *pci_br1; 315 struct pci_dev *pci_ha1_ta; 316 struct pci_dev *pci_tad[NUM_CHANNELS]; 317 318 struct sbridge_dev *sbridge_dev; 319 320 struct sbridge_info info; 321 struct sbridge_channel channel[NUM_CHANNELS]; 322 323 /* Memory type detection */ 324 bool is_mirrored, is_lockstep, is_close_pg; 325 326 /* Fifo double buffers */ 327 struct mce mce_entry[MCE_LOG_LEN]; 328 struct mce mce_outentry[MCE_LOG_LEN]; 329 330 /* Fifo in/out counters */ 331 unsigned mce_in, mce_out; 332 333 /* Count indicator to show errors not got */ 334 unsigned mce_overrun; 335 336 /* Memory description */ 337 u64 tolm, tohm; 338 }; 339 340 #define PCI_DESCR(device_id, opt) \ 341 .dev_id = (device_id), \ 342 .optional = opt 343 344 static const struct pci_id_descr pci_dev_descr_sbridge[] = { 345 /* Processor Home Agent */ 346 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0, 0) }, 347 348 /* Memory controller */ 349 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA, 0) }, 350 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS, 0) }, 351 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0, 0) }, 352 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1, 0) }, 353 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2, 0) }, 354 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3, 0) }, 355 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1) }, 356 357 /* System Address Decoder */ 358 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0, 0) }, 359 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1, 0) }, 360 361 /* Broadcast Registers */ 362 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) }, 363 }; 364 365 #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) } 366 static const struct pci_id_table pci_dev_descr_sbridge_table[] = { 367 PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge), 368 {0,} /* 0 terminated list. */ 369 }; 370 371 /* This changes depending if 1HA or 2HA: 372 * 1HA: 373 * 0x0eb8 (17.0) is DDRIO0 374 * 2HA: 375 * 0x0ebc (17.4) is DDRIO0 376 */ 377 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0 0x0eb8 378 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0 0x0ebc 379 380 /* pci ids */ 381 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0 0x0ea0 382 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA 0x0ea8 383 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS 0x0e71 384 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0 0x0eaa 385 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1 0x0eab 386 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2 0x0eac 387 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3 0x0ead 388 #define PCI_DEVICE_ID_INTEL_IBRIDGE_SAD 0x0ec8 389 #define PCI_DEVICE_ID_INTEL_IBRIDGE_BR0 0x0ec9 390 #define PCI_DEVICE_ID_INTEL_IBRIDGE_BR1 0x0eca 391 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1 0x0e60 392 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA 0x0e68 393 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS 0x0e79 394 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 0x0e6a 395 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1 0x0e6b 396 397 static const struct pci_id_descr pci_dev_descr_ibridge[] = { 398 /* Processor Home Agent */ 399 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0) }, 400 401 /* Memory controller */ 402 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0) }, 403 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0) }, 404 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0) }, 405 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0) }, 406 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0) }, 407 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0) }, 408 409 /* System Address Decoder */ 410 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0) }, 411 412 /* Broadcast Registers */ 413 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1) }, 414 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0) }, 415 416 /* Optional, mode 2HA */ 417 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1) }, 418 #if 0 419 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1) }, 420 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1) }, 421 #endif 422 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1) }, 423 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1) }, 424 425 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1) }, 426 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1) }, 427 }; 428 429 static const struct pci_id_table pci_dev_descr_ibridge_table[] = { 430 PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge), 431 {0,} /* 0 terminated list. */ 432 }; 433 434 /* Haswell support */ 435 /* EN processor: 436 * - 1 IMC 437 * - 3 DDR3 channels, 2 DPC per channel 438 * EP processor: 439 * - 1 or 2 IMC 440 * - 4 DDR4 channels, 3 DPC per channel 441 * EP 4S processor: 442 * - 2 IMC 443 * - 4 DDR4 channels, 3 DPC per channel 444 * EX processor: 445 * - 2 IMC 446 * - each IMC interfaces with a SMI 2 channel 447 * - each SMI channel interfaces with a scalable memory buffer 448 * - each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC 449 */ 450 #define HASWELL_DDRCRCLKCONTROLS 0xa10 /* Ditto on Broadwell */ 451 #define HASWELL_HASYSDEFEATURE2 0x84 452 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC 0x2f28 453 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0 0x2fa0 454 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1 0x2f60 455 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA 0x2fa8 456 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL 0x2f71 457 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA 0x2f68 458 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL 0x2f79 459 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc 460 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd 461 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa 462 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1 0x2fab 463 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2 0x2fac 464 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3 0x2fad 465 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 0x2f6a 466 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1 0x2f6b 467 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c 468 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d 469 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd 470 static const struct pci_id_descr pci_dev_descr_haswell[] = { 471 /* first item must be the HA */ 472 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0) }, 473 474 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0) }, 475 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0) }, 476 477 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, 1) }, 478 479 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA, 0) }, 480 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL, 0) }, 481 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0) }, 482 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0) }, 483 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1) }, 484 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1) }, 485 486 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1) }, 487 488 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1) }, 489 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL, 1) }, 490 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1) }, 491 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1) }, 492 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1) }, 493 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1) }, 494 }; 495 496 static const struct pci_id_table pci_dev_descr_haswell_table[] = { 497 PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell), 498 {0,} /* 0 terminated list. */ 499 }; 500 501 /* 502 * Broadwell support 503 * 504 * DE processor: 505 * - 1 IMC 506 * - 2 DDR3 channels, 2 DPC per channel 507 */ 508 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC 0x6f28 509 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0 0x6fa0 510 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA 0x6fa8 511 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL 0x6f71 512 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0 0x6ffc 513 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1 0x6ffd 514 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0 0x6faa 515 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1 0x6fab 516 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2 0x6fac 517 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3 0x6fad 518 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0 0x6faf 519 520 static const struct pci_id_descr pci_dev_descr_broadwell[] = { 521 /* first item must be the HA */ 522 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0, 0) }, 523 524 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0, 0) }, 525 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1, 0) }, 526 527 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA, 0) }, 528 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL, 0) }, 529 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0, 0) }, 530 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1, 0) }, 531 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2, 0) }, 532 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3, 0) }, 533 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0, 1) }, 534 }; 535 536 static const struct pci_id_table pci_dev_descr_broadwell_table[] = { 537 PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell), 538 {0,} /* 0 terminated list. */ 539 }; 540 541 /* 542 * pci_device_id table for which devices we are looking for 543 */ 544 static const struct pci_device_id sbridge_pci_tbl[] = { 545 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0)}, 546 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA)}, 547 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0)}, 548 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0)}, 549 {0,} /* 0 terminated list. */ 550 }; 551 552 553 /**************************************************************************** 554 Ancillary status routines 555 ****************************************************************************/ 556 557 static inline int numrank(enum type type, u32 mtr) 558 { 559 int ranks = (1 << RANK_CNT_BITS(mtr)); 560 int max = 4; 561 562 if (type == HASWELL) 563 max = 8; 564 565 if (ranks > max) { 566 edac_dbg(0, "Invalid number of ranks: %d (max = %i) raw value = %x (%04x)\n", 567 ranks, max, (unsigned int)RANK_CNT_BITS(mtr), mtr); 568 return -EINVAL; 569 } 570 571 return ranks; 572 } 573 574 static inline int numrow(u32 mtr) 575 { 576 int rows = (RANK_WIDTH_BITS(mtr) + 12); 577 578 if (rows < 13 || rows > 18) { 579 edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n", 580 rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr); 581 return -EINVAL; 582 } 583 584 return 1 << rows; 585 } 586 587 static inline int numcol(u32 mtr) 588 { 589 int cols = (COL_WIDTH_BITS(mtr) + 10); 590 591 if (cols > 12) { 592 edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n", 593 cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr); 594 return -EINVAL; 595 } 596 597 return 1 << cols; 598 } 599 600 static struct sbridge_dev *get_sbridge_dev(u8 bus) 601 { 602 struct sbridge_dev *sbridge_dev; 603 604 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { 605 if (sbridge_dev->bus == bus) 606 return sbridge_dev; 607 } 608 609 return NULL; 610 } 611 612 static struct sbridge_dev *alloc_sbridge_dev(u8 bus, 613 const struct pci_id_table *table) 614 { 615 struct sbridge_dev *sbridge_dev; 616 617 sbridge_dev = kzalloc(sizeof(*sbridge_dev), GFP_KERNEL); 618 if (!sbridge_dev) 619 return NULL; 620 621 sbridge_dev->pdev = kzalloc(sizeof(*sbridge_dev->pdev) * table->n_devs, 622 GFP_KERNEL); 623 if (!sbridge_dev->pdev) { 624 kfree(sbridge_dev); 625 return NULL; 626 } 627 628 sbridge_dev->bus = bus; 629 sbridge_dev->n_devs = table->n_devs; 630 list_add_tail(&sbridge_dev->list, &sbridge_edac_list); 631 632 return sbridge_dev; 633 } 634 635 static void free_sbridge_dev(struct sbridge_dev *sbridge_dev) 636 { 637 list_del(&sbridge_dev->list); 638 kfree(sbridge_dev->pdev); 639 kfree(sbridge_dev); 640 } 641 642 static u64 sbridge_get_tolm(struct sbridge_pvt *pvt) 643 { 644 u32 reg; 645 646 /* Address range is 32:28 */ 647 pci_read_config_dword(pvt->pci_sad1, TOLM, ®); 648 return GET_TOLM(reg); 649 } 650 651 static u64 sbridge_get_tohm(struct sbridge_pvt *pvt) 652 { 653 u32 reg; 654 655 pci_read_config_dword(pvt->pci_sad1, TOHM, ®); 656 return GET_TOHM(reg); 657 } 658 659 static u64 ibridge_get_tolm(struct sbridge_pvt *pvt) 660 { 661 u32 reg; 662 663 pci_read_config_dword(pvt->pci_br1, TOLM, ®); 664 665 return GET_TOLM(reg); 666 } 667 668 static u64 ibridge_get_tohm(struct sbridge_pvt *pvt) 669 { 670 u32 reg; 671 672 pci_read_config_dword(pvt->pci_br1, TOHM, ®); 673 674 return GET_TOHM(reg); 675 } 676 677 static u64 rir_limit(u32 reg) 678 { 679 return ((u64)GET_BITFIELD(reg, 1, 10) << 29) | 0x1fffffff; 680 } 681 682 static enum mem_type get_memory_type(struct sbridge_pvt *pvt) 683 { 684 u32 reg; 685 enum mem_type mtype; 686 687 if (pvt->pci_ddrio) { 688 pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr, 689 ®); 690 if (GET_BITFIELD(reg, 11, 11)) 691 /* FIXME: Can also be LRDIMM */ 692 mtype = MEM_RDDR3; 693 else 694 mtype = MEM_DDR3; 695 } else 696 mtype = MEM_UNKNOWN; 697 698 return mtype; 699 } 700 701 static enum mem_type haswell_get_memory_type(struct sbridge_pvt *pvt) 702 { 703 u32 reg; 704 bool registered = false; 705 enum mem_type mtype = MEM_UNKNOWN; 706 707 if (!pvt->pci_ddrio) 708 goto out; 709 710 pci_read_config_dword(pvt->pci_ddrio, 711 HASWELL_DDRCRCLKCONTROLS, ®); 712 /* Is_Rdimm */ 713 if (GET_BITFIELD(reg, 16, 16)) 714 registered = true; 715 716 pci_read_config_dword(pvt->pci_ta, MCMTR, ®); 717 if (GET_BITFIELD(reg, 14, 14)) { 718 if (registered) 719 mtype = MEM_RDDR4; 720 else 721 mtype = MEM_DDR4; 722 } else { 723 if (registered) 724 mtype = MEM_RDDR3; 725 else 726 mtype = MEM_DDR3; 727 } 728 729 out: 730 return mtype; 731 } 732 733 static u8 get_node_id(struct sbridge_pvt *pvt) 734 { 735 u32 reg; 736 pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, ®); 737 return GET_BITFIELD(reg, 0, 2); 738 } 739 740 static u8 haswell_get_node_id(struct sbridge_pvt *pvt) 741 { 742 u32 reg; 743 744 pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, ®); 745 return GET_BITFIELD(reg, 0, 3); 746 } 747 748 static u64 haswell_get_tolm(struct sbridge_pvt *pvt) 749 { 750 u32 reg; 751 752 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOLM, ®); 753 return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff; 754 } 755 756 static u64 haswell_get_tohm(struct sbridge_pvt *pvt) 757 { 758 u64 rc; 759 u32 reg; 760 761 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_0, ®); 762 rc = GET_BITFIELD(reg, 26, 31); 763 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, ®); 764 rc = ((reg << 6) | rc) << 26; 765 766 return rc | 0x1ffffff; 767 } 768 769 static u64 haswell_rir_limit(u32 reg) 770 { 771 return (((u64)GET_BITFIELD(reg, 1, 11) + 1) << 29) - 1; 772 } 773 774 static inline u8 sad_pkg_socket(u8 pkg) 775 { 776 /* on Ivy Bridge, nodeID is SASS, where A is HA and S is node id */ 777 return ((pkg >> 3) << 2) | (pkg & 0x3); 778 } 779 780 static inline u8 sad_pkg_ha(u8 pkg) 781 { 782 return (pkg >> 2) & 0x1; 783 } 784 785 /**************************************************************************** 786 Memory check routines 787 ****************************************************************************/ 788 static struct pci_dev *get_pdev_same_bus(u8 bus, u32 id) 789 { 790 struct pci_dev *pdev = NULL; 791 792 do { 793 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, id, pdev); 794 if (pdev && pdev->bus->number == bus) 795 break; 796 } while (pdev); 797 798 return pdev; 799 } 800 801 /** 802 * check_if_ecc_is_active() - Checks if ECC is active 803 * @bus: Device bus 804 * @type: Memory controller type 805 * returns: 0 in case ECC is active, -ENODEV if it can't be determined or 806 * disabled 807 */ 808 static int check_if_ecc_is_active(const u8 bus, enum type type) 809 { 810 struct pci_dev *pdev = NULL; 811 u32 mcmtr, id; 812 813 switch (type) { 814 case IVY_BRIDGE: 815 id = PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA; 816 break; 817 case HASWELL: 818 id = PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA; 819 break; 820 case SANDY_BRIDGE: 821 id = PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA; 822 break; 823 case BROADWELL: 824 id = PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA; 825 break; 826 default: 827 return -ENODEV; 828 } 829 830 pdev = get_pdev_same_bus(bus, id); 831 if (!pdev) { 832 sbridge_printk(KERN_ERR, "Couldn't find PCI device " 833 "%04x:%04x! on bus %02d\n", 834 PCI_VENDOR_ID_INTEL, id, bus); 835 return -ENODEV; 836 } 837 838 pci_read_config_dword(pdev, MCMTR, &mcmtr); 839 if (!IS_ECC_ENABLED(mcmtr)) { 840 sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n"); 841 return -ENODEV; 842 } 843 return 0; 844 } 845 846 static int get_dimm_config(struct mem_ctl_info *mci) 847 { 848 struct sbridge_pvt *pvt = mci->pvt_info; 849 struct dimm_info *dimm; 850 unsigned i, j, banks, ranks, rows, cols, npages; 851 u64 size; 852 u32 reg; 853 enum edac_type mode; 854 enum mem_type mtype; 855 856 if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) 857 pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, ®); 858 else 859 pci_read_config_dword(pvt->pci_br0, SAD_TARGET, ®); 860 861 pvt->sbridge_dev->source_id = SOURCE_ID(reg); 862 863 pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt); 864 edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n", 865 pvt->sbridge_dev->mc, 866 pvt->sbridge_dev->node_id, 867 pvt->sbridge_dev->source_id); 868 869 pci_read_config_dword(pvt->pci_ras, RASENABLES, ®); 870 if (IS_MIRROR_ENABLED(reg)) { 871 edac_dbg(0, "Memory mirror is enabled\n"); 872 pvt->is_mirrored = true; 873 } else { 874 edac_dbg(0, "Memory mirror is disabled\n"); 875 pvt->is_mirrored = false; 876 } 877 878 pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr); 879 if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) { 880 edac_dbg(0, "Lockstep is enabled\n"); 881 mode = EDAC_S8ECD8ED; 882 pvt->is_lockstep = true; 883 } else { 884 edac_dbg(0, "Lockstep is disabled\n"); 885 mode = EDAC_S4ECD4ED; 886 pvt->is_lockstep = false; 887 } 888 if (IS_CLOSE_PG(pvt->info.mcmtr)) { 889 edac_dbg(0, "address map is on closed page mode\n"); 890 pvt->is_close_pg = true; 891 } else { 892 edac_dbg(0, "address map is on open page mode\n"); 893 pvt->is_close_pg = false; 894 } 895 896 mtype = pvt->info.get_memory_type(pvt); 897 if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4) 898 edac_dbg(0, "Memory is registered\n"); 899 else if (mtype == MEM_UNKNOWN) 900 edac_dbg(0, "Cannot determine memory type\n"); 901 else 902 edac_dbg(0, "Memory is unregistered\n"); 903 904 if (mtype == MEM_DDR4 || mtype == MEM_RDDR4) 905 banks = 16; 906 else 907 banks = 8; 908 909 for (i = 0; i < NUM_CHANNELS; i++) { 910 u32 mtr; 911 912 for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) { 913 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, 914 i, j, 0); 915 pci_read_config_dword(pvt->pci_tad[i], 916 mtr_regs[j], &mtr); 917 edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr); 918 if (IS_DIMM_PRESENT(mtr)) { 919 pvt->channel[i].dimms++; 920 921 ranks = numrank(pvt->info.type, mtr); 922 rows = numrow(mtr); 923 cols = numcol(mtr); 924 925 size = ((u64)rows * cols * banks * ranks) >> (20 - 3); 926 npages = MiB_TO_PAGES(size); 927 928 edac_dbg(0, "mc#%d: channel %d, dimm %d, %Ld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", 929 pvt->sbridge_dev->mc, i, j, 930 size, npages, 931 banks, ranks, rows, cols); 932 933 dimm->nr_pages = npages; 934 dimm->grain = 32; 935 switch (banks) { 936 case 16: 937 dimm->dtype = DEV_X16; 938 break; 939 case 8: 940 dimm->dtype = DEV_X8; 941 break; 942 case 4: 943 dimm->dtype = DEV_X4; 944 break; 945 } 946 dimm->mtype = mtype; 947 dimm->edac_mode = mode; 948 snprintf(dimm->label, sizeof(dimm->label), 949 "CPU_SrcID#%u_Channel#%u_DIMM#%u", 950 pvt->sbridge_dev->source_id, i, j); 951 } 952 } 953 } 954 955 return 0; 956 } 957 958 static void get_memory_layout(const struct mem_ctl_info *mci) 959 { 960 struct sbridge_pvt *pvt = mci->pvt_info; 961 int i, j, k, n_sads, n_tads, sad_interl; 962 u32 reg; 963 u64 limit, prv = 0; 964 u64 tmp_mb; 965 u32 gb, mb; 966 u32 rir_way; 967 968 /* 969 * Step 1) Get TOLM/TOHM ranges 970 */ 971 972 pvt->tolm = pvt->info.get_tolm(pvt); 973 tmp_mb = (1 + pvt->tolm) >> 20; 974 975 gb = div_u64_rem(tmp_mb, 1024, &mb); 976 edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", 977 gb, (mb*1000)/1024, (u64)pvt->tolm); 978 979 /* Address range is already 45:25 */ 980 pvt->tohm = pvt->info.get_tohm(pvt); 981 tmp_mb = (1 + pvt->tohm) >> 20; 982 983 gb = div_u64_rem(tmp_mb, 1024, &mb); 984 edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", 985 gb, (mb*1000)/1024, (u64)pvt->tohm); 986 987 /* 988 * Step 2) Get SAD range and SAD Interleave list 989 * TAD registers contain the interleave wayness. However, it 990 * seems simpler to just discover it indirectly, with the 991 * algorithm bellow. 992 */ 993 prv = 0; 994 for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) { 995 /* SAD_LIMIT Address range is 45:26 */ 996 pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads], 997 ®); 998 limit = SAD_LIMIT(reg); 999 1000 if (!DRAM_RULE_ENABLE(reg)) 1001 continue; 1002 1003 if (limit <= prv) 1004 break; 1005 1006 tmp_mb = (limit + 1) >> 20; 1007 gb = div_u64_rem(tmp_mb, 1024, &mb); 1008 edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n", 1009 n_sads, 1010 get_dram_attr(reg), 1011 gb, (mb*1000)/1024, 1012 ((u64)tmp_mb) << 20L, 1013 INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]", 1014 reg); 1015 prv = limit; 1016 1017 pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads], 1018 ®); 1019 sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0); 1020 for (j = 0; j < 8; j++) { 1021 u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, j); 1022 if (j > 0 && sad_interl == pkg) 1023 break; 1024 1025 edac_dbg(0, "SAD#%d, interleave #%d: %d\n", 1026 n_sads, j, pkg); 1027 } 1028 } 1029 1030 /* 1031 * Step 3) Get TAD range 1032 */ 1033 prv = 0; 1034 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) { 1035 pci_read_config_dword(pvt->pci_ha0, tad_dram_rule[n_tads], 1036 ®); 1037 limit = TAD_LIMIT(reg); 1038 if (limit <= prv) 1039 break; 1040 tmp_mb = (limit + 1) >> 20; 1041 1042 gb = div_u64_rem(tmp_mb, 1024, &mb); 1043 edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n", 1044 n_tads, gb, (mb*1000)/1024, 1045 ((u64)tmp_mb) << 20L, 1046 (u32)TAD_SOCK(reg), 1047 (u32)TAD_CH(reg), 1048 (u32)TAD_TGT0(reg), 1049 (u32)TAD_TGT1(reg), 1050 (u32)TAD_TGT2(reg), 1051 (u32)TAD_TGT3(reg), 1052 reg); 1053 prv = limit; 1054 } 1055 1056 /* 1057 * Step 4) Get TAD offsets, per each channel 1058 */ 1059 for (i = 0; i < NUM_CHANNELS; i++) { 1060 if (!pvt->channel[i].dimms) 1061 continue; 1062 for (j = 0; j < n_tads; j++) { 1063 pci_read_config_dword(pvt->pci_tad[i], 1064 tad_ch_nilv_offset[j], 1065 ®); 1066 tmp_mb = TAD_OFFSET(reg) >> 20; 1067 gb = div_u64_rem(tmp_mb, 1024, &mb); 1068 edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n", 1069 i, j, 1070 gb, (mb*1000)/1024, 1071 ((u64)tmp_mb) << 20L, 1072 reg); 1073 } 1074 } 1075 1076 /* 1077 * Step 6) Get RIR Wayness/Limit, per each channel 1078 */ 1079 for (i = 0; i < NUM_CHANNELS; i++) { 1080 if (!pvt->channel[i].dimms) 1081 continue; 1082 for (j = 0; j < MAX_RIR_RANGES; j++) { 1083 pci_read_config_dword(pvt->pci_tad[i], 1084 rir_way_limit[j], 1085 ®); 1086 1087 if (!IS_RIR_VALID(reg)) 1088 continue; 1089 1090 tmp_mb = pvt->info.rir_limit(reg) >> 20; 1091 rir_way = 1 << RIR_WAY(reg); 1092 gb = div_u64_rem(tmp_mb, 1024, &mb); 1093 edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n", 1094 i, j, 1095 gb, (mb*1000)/1024, 1096 ((u64)tmp_mb) << 20L, 1097 rir_way, 1098 reg); 1099 1100 for (k = 0; k < rir_way; k++) { 1101 pci_read_config_dword(pvt->pci_tad[i], 1102 rir_offset[j][k], 1103 ®); 1104 tmp_mb = RIR_OFFSET(reg) << 6; 1105 1106 gb = div_u64_rem(tmp_mb, 1024, &mb); 1107 edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n", 1108 i, j, k, 1109 gb, (mb*1000)/1024, 1110 ((u64)tmp_mb) << 20L, 1111 (u32)RIR_RNK_TGT(reg), 1112 reg); 1113 } 1114 } 1115 } 1116 } 1117 1118 static struct mem_ctl_info *get_mci_for_node_id(u8 node_id) 1119 { 1120 struct sbridge_dev *sbridge_dev; 1121 1122 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { 1123 if (sbridge_dev->node_id == node_id) 1124 return sbridge_dev->mci; 1125 } 1126 return NULL; 1127 } 1128 1129 static int get_memory_error_data(struct mem_ctl_info *mci, 1130 u64 addr, 1131 u8 *socket, 1132 long *channel_mask, 1133 u8 *rank, 1134 char **area_type, char *msg) 1135 { 1136 struct mem_ctl_info *new_mci; 1137 struct sbridge_pvt *pvt = mci->pvt_info; 1138 struct pci_dev *pci_ha; 1139 int n_rir, n_sads, n_tads, sad_way, sck_xch; 1140 int sad_interl, idx, base_ch; 1141 int interleave_mode, shiftup = 0; 1142 unsigned sad_interleave[pvt->info.max_interleave]; 1143 u32 reg, dram_rule; 1144 u8 ch_way, sck_way, pkg, sad_ha = 0; 1145 u32 tad_offset; 1146 u32 rir_way; 1147 u32 mb, gb; 1148 u64 ch_addr, offset, limit = 0, prv = 0; 1149 1150 1151 /* 1152 * Step 0) Check if the address is at special memory ranges 1153 * The check bellow is probably enough to fill all cases where 1154 * the error is not inside a memory, except for the legacy 1155 * range (e. g. VGA addresses). It is unlikely, however, that the 1156 * memory controller would generate an error on that range. 1157 */ 1158 if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) { 1159 sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr); 1160 return -EINVAL; 1161 } 1162 if (addr >= (u64)pvt->tohm) { 1163 sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr); 1164 return -EINVAL; 1165 } 1166 1167 /* 1168 * Step 1) Get socket 1169 */ 1170 for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) { 1171 pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads], 1172 ®); 1173 1174 if (!DRAM_RULE_ENABLE(reg)) 1175 continue; 1176 1177 limit = SAD_LIMIT(reg); 1178 if (limit <= prv) { 1179 sprintf(msg, "Can't discover the memory socket"); 1180 return -EINVAL; 1181 } 1182 if (addr <= limit) 1183 break; 1184 prv = limit; 1185 } 1186 if (n_sads == pvt->info.max_sad) { 1187 sprintf(msg, "Can't discover the memory socket"); 1188 return -EINVAL; 1189 } 1190 dram_rule = reg; 1191 *area_type = get_dram_attr(dram_rule); 1192 interleave_mode = INTERLEAVE_MODE(dram_rule); 1193 1194 pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads], 1195 ®); 1196 1197 if (pvt->info.type == SANDY_BRIDGE) { 1198 sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0); 1199 for (sad_way = 0; sad_way < 8; sad_way++) { 1200 u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, sad_way); 1201 if (sad_way > 0 && sad_interl == pkg) 1202 break; 1203 sad_interleave[sad_way] = pkg; 1204 edac_dbg(0, "SAD interleave #%d: %d\n", 1205 sad_way, sad_interleave[sad_way]); 1206 } 1207 edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n", 1208 pvt->sbridge_dev->mc, 1209 n_sads, 1210 addr, 1211 limit, 1212 sad_way + 7, 1213 !interleave_mode ? "" : "XOR[18:16]"); 1214 if (interleave_mode) 1215 idx = ((addr >> 6) ^ (addr >> 16)) & 7; 1216 else 1217 idx = (addr >> 6) & 7; 1218 switch (sad_way) { 1219 case 1: 1220 idx = 0; 1221 break; 1222 case 2: 1223 idx = idx & 1; 1224 break; 1225 case 4: 1226 idx = idx & 3; 1227 break; 1228 case 8: 1229 break; 1230 default: 1231 sprintf(msg, "Can't discover socket interleave"); 1232 return -EINVAL; 1233 } 1234 *socket = sad_interleave[idx]; 1235 edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n", 1236 idx, sad_way, *socket); 1237 } else if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) { 1238 int bits, a7mode = A7MODE(dram_rule); 1239 1240 if (a7mode) { 1241 /* A7 mode swaps P9 with P6 */ 1242 bits = GET_BITFIELD(addr, 7, 8) << 1; 1243 bits |= GET_BITFIELD(addr, 9, 9); 1244 } else 1245 bits = GET_BITFIELD(addr, 7, 9); 1246 1247 if (interleave_mode) { 1248 /* interleave mode will XOR {8,7,6} with {18,17,16} */ 1249 idx = GET_BITFIELD(addr, 16, 18); 1250 idx ^= bits; 1251 } else 1252 idx = bits; 1253 1254 pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx); 1255 *socket = sad_pkg_socket(pkg); 1256 sad_ha = sad_pkg_ha(pkg); 1257 1258 if (a7mode) { 1259 /* MCChanShiftUpEnable */ 1260 pci_read_config_dword(pvt->pci_ha0, 1261 HASWELL_HASYSDEFEATURE2, ®); 1262 shiftup = GET_BITFIELD(reg, 22, 22); 1263 } 1264 1265 edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %i, shiftup: %i\n", 1266 idx, *socket, sad_ha, shiftup); 1267 } else { 1268 /* Ivy Bridge's SAD mode doesn't support XOR interleave mode */ 1269 idx = (addr >> 6) & 7; 1270 pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx); 1271 *socket = sad_pkg_socket(pkg); 1272 sad_ha = sad_pkg_ha(pkg); 1273 edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n", 1274 idx, *socket, sad_ha); 1275 } 1276 1277 /* 1278 * Move to the proper node structure, in order to access the 1279 * right PCI registers 1280 */ 1281 new_mci = get_mci_for_node_id(*socket); 1282 if (!new_mci) { 1283 sprintf(msg, "Struct for socket #%u wasn't initialized", 1284 *socket); 1285 return -EINVAL; 1286 } 1287 mci = new_mci; 1288 pvt = mci->pvt_info; 1289 1290 /* 1291 * Step 2) Get memory channel 1292 */ 1293 prv = 0; 1294 if (pvt->info.type == SANDY_BRIDGE) 1295 pci_ha = pvt->pci_ha0; 1296 else { 1297 if (sad_ha) 1298 pci_ha = pvt->pci_ha1; 1299 else 1300 pci_ha = pvt->pci_ha0; 1301 } 1302 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) { 1303 pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], ®); 1304 limit = TAD_LIMIT(reg); 1305 if (limit <= prv) { 1306 sprintf(msg, "Can't discover the memory channel"); 1307 return -EINVAL; 1308 } 1309 if (addr <= limit) 1310 break; 1311 prv = limit; 1312 } 1313 if (n_tads == MAX_TAD) { 1314 sprintf(msg, "Can't discover the memory channel"); 1315 return -EINVAL; 1316 } 1317 1318 ch_way = TAD_CH(reg) + 1; 1319 sck_way = TAD_SOCK(reg) + 1; 1320 1321 if (ch_way == 3) 1322 idx = addr >> 6; 1323 else 1324 idx = (addr >> (6 + sck_way + shiftup)) & 0x3; 1325 idx = idx % ch_way; 1326 1327 /* 1328 * FIXME: Shouldn't we use CHN_IDX_OFFSET() here, when ch_way == 3 ??? 1329 */ 1330 switch (idx) { 1331 case 0: 1332 base_ch = TAD_TGT0(reg); 1333 break; 1334 case 1: 1335 base_ch = TAD_TGT1(reg); 1336 break; 1337 case 2: 1338 base_ch = TAD_TGT2(reg); 1339 break; 1340 case 3: 1341 base_ch = TAD_TGT3(reg); 1342 break; 1343 default: 1344 sprintf(msg, "Can't discover the TAD target"); 1345 return -EINVAL; 1346 } 1347 *channel_mask = 1 << base_ch; 1348 1349 pci_read_config_dword(pvt->pci_tad[base_ch], 1350 tad_ch_nilv_offset[n_tads], 1351 &tad_offset); 1352 1353 if (pvt->is_mirrored) { 1354 *channel_mask |= 1 << ((base_ch + 2) % 4); 1355 switch(ch_way) { 1356 case 2: 1357 case 4: 1358 sck_xch = 1 << sck_way * (ch_way >> 1); 1359 break; 1360 default: 1361 sprintf(msg, "Invalid mirror set. Can't decode addr"); 1362 return -EINVAL; 1363 } 1364 } else 1365 sck_xch = (1 << sck_way) * ch_way; 1366 1367 if (pvt->is_lockstep) 1368 *channel_mask |= 1 << ((base_ch + 1) % 4); 1369 1370 offset = TAD_OFFSET(tad_offset); 1371 1372 edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n", 1373 n_tads, 1374 addr, 1375 limit, 1376 (u32)TAD_SOCK(reg), 1377 ch_way, 1378 offset, 1379 idx, 1380 base_ch, 1381 *channel_mask); 1382 1383 /* Calculate channel address */ 1384 /* Remove the TAD offset */ 1385 1386 if (offset > addr) { 1387 sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!", 1388 offset, addr); 1389 return -EINVAL; 1390 } 1391 addr -= offset; 1392 /* Store the low bits [0:6] of the addr */ 1393 ch_addr = addr & 0x7f; 1394 /* Remove socket wayness and remove 6 bits */ 1395 addr >>= 6; 1396 addr = div_u64(addr, sck_xch); 1397 #if 0 1398 /* Divide by channel way */ 1399 addr = addr / ch_way; 1400 #endif 1401 /* Recover the last 6 bits */ 1402 ch_addr |= addr << 6; 1403 1404 /* 1405 * Step 3) Decode rank 1406 */ 1407 for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) { 1408 pci_read_config_dword(pvt->pci_tad[base_ch], 1409 rir_way_limit[n_rir], 1410 ®); 1411 1412 if (!IS_RIR_VALID(reg)) 1413 continue; 1414 1415 limit = pvt->info.rir_limit(reg); 1416 gb = div_u64_rem(limit >> 20, 1024, &mb); 1417 edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n", 1418 n_rir, 1419 gb, (mb*1000)/1024, 1420 limit, 1421 1 << RIR_WAY(reg)); 1422 if (ch_addr <= limit) 1423 break; 1424 } 1425 if (n_rir == MAX_RIR_RANGES) { 1426 sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx", 1427 ch_addr); 1428 return -EINVAL; 1429 } 1430 rir_way = RIR_WAY(reg); 1431 1432 if (pvt->is_close_pg) 1433 idx = (ch_addr >> 6); 1434 else 1435 idx = (ch_addr >> 13); /* FIXME: Datasheet says to shift by 15 */ 1436 idx %= 1 << rir_way; 1437 1438 pci_read_config_dword(pvt->pci_tad[base_ch], 1439 rir_offset[n_rir][idx], 1440 ®); 1441 *rank = RIR_RNK_TGT(reg); 1442 1443 edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n", 1444 n_rir, 1445 ch_addr, 1446 limit, 1447 rir_way, 1448 idx); 1449 1450 return 0; 1451 } 1452 1453 /**************************************************************************** 1454 Device initialization routines: put/get, init/exit 1455 ****************************************************************************/ 1456 1457 /* 1458 * sbridge_put_all_devices 'put' all the devices that we have 1459 * reserved via 'get' 1460 */ 1461 static void sbridge_put_devices(struct sbridge_dev *sbridge_dev) 1462 { 1463 int i; 1464 1465 edac_dbg(0, "\n"); 1466 for (i = 0; i < sbridge_dev->n_devs; i++) { 1467 struct pci_dev *pdev = sbridge_dev->pdev[i]; 1468 if (!pdev) 1469 continue; 1470 edac_dbg(0, "Removing dev %02x:%02x.%d\n", 1471 pdev->bus->number, 1472 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 1473 pci_dev_put(pdev); 1474 } 1475 } 1476 1477 static void sbridge_put_all_devices(void) 1478 { 1479 struct sbridge_dev *sbridge_dev, *tmp; 1480 1481 list_for_each_entry_safe(sbridge_dev, tmp, &sbridge_edac_list, list) { 1482 sbridge_put_devices(sbridge_dev); 1483 free_sbridge_dev(sbridge_dev); 1484 } 1485 } 1486 1487 static int sbridge_get_onedevice(struct pci_dev **prev, 1488 u8 *num_mc, 1489 const struct pci_id_table *table, 1490 const unsigned devno) 1491 { 1492 struct sbridge_dev *sbridge_dev; 1493 const struct pci_id_descr *dev_descr = &table->descr[devno]; 1494 struct pci_dev *pdev = NULL; 1495 u8 bus = 0; 1496 1497 sbridge_printk(KERN_DEBUG, 1498 "Seeking for: PCI ID %04x:%04x\n", 1499 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 1500 1501 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 1502 dev_descr->dev_id, *prev); 1503 1504 if (!pdev) { 1505 if (*prev) { 1506 *prev = pdev; 1507 return 0; 1508 } 1509 1510 if (dev_descr->optional) 1511 return 0; 1512 1513 /* if the HA wasn't found */ 1514 if (devno == 0) 1515 return -ENODEV; 1516 1517 sbridge_printk(KERN_INFO, 1518 "Device not found: %04x:%04x\n", 1519 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 1520 1521 /* End of list, leave */ 1522 return -ENODEV; 1523 } 1524 bus = pdev->bus->number; 1525 1526 sbridge_dev = get_sbridge_dev(bus); 1527 if (!sbridge_dev) { 1528 sbridge_dev = alloc_sbridge_dev(bus, table); 1529 if (!sbridge_dev) { 1530 pci_dev_put(pdev); 1531 return -ENOMEM; 1532 } 1533 (*num_mc)++; 1534 } 1535 1536 if (sbridge_dev->pdev[devno]) { 1537 sbridge_printk(KERN_ERR, 1538 "Duplicated device for %04x:%04x\n", 1539 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 1540 pci_dev_put(pdev); 1541 return -ENODEV; 1542 } 1543 1544 sbridge_dev->pdev[devno] = pdev; 1545 1546 /* Be sure that the device is enabled */ 1547 if (unlikely(pci_enable_device(pdev) < 0)) { 1548 sbridge_printk(KERN_ERR, 1549 "Couldn't enable %04x:%04x\n", 1550 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 1551 return -ENODEV; 1552 } 1553 1554 edac_dbg(0, "Detected %04x:%04x\n", 1555 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 1556 1557 /* 1558 * As stated on drivers/pci/search.c, the reference count for 1559 * @from is always decremented if it is not %NULL. So, as we need 1560 * to get all devices up to null, we need to do a get for the device 1561 */ 1562 pci_dev_get(pdev); 1563 1564 *prev = pdev; 1565 1566 return 0; 1567 } 1568 1569 /* 1570 * sbridge_get_all_devices - Find and perform 'get' operation on the MCH's 1571 * devices we want to reference for this driver. 1572 * @num_mc: pointer to the memory controllers count, to be incremented in case 1573 * of success. 1574 * @table: model specific table 1575 * 1576 * returns 0 in case of success or error code 1577 */ 1578 static int sbridge_get_all_devices(u8 *num_mc, 1579 const struct pci_id_table *table) 1580 { 1581 int i, rc; 1582 struct pci_dev *pdev = NULL; 1583 1584 while (table && table->descr) { 1585 for (i = 0; i < table->n_devs; i++) { 1586 pdev = NULL; 1587 do { 1588 rc = sbridge_get_onedevice(&pdev, num_mc, 1589 table, i); 1590 if (rc < 0) { 1591 if (i == 0) { 1592 i = table->n_devs; 1593 break; 1594 } 1595 sbridge_put_all_devices(); 1596 return -ENODEV; 1597 } 1598 } while (pdev); 1599 } 1600 table++; 1601 } 1602 1603 return 0; 1604 } 1605 1606 static int sbridge_mci_bind_devs(struct mem_ctl_info *mci, 1607 struct sbridge_dev *sbridge_dev) 1608 { 1609 struct sbridge_pvt *pvt = mci->pvt_info; 1610 struct pci_dev *pdev; 1611 int i; 1612 1613 for (i = 0; i < sbridge_dev->n_devs; i++) { 1614 pdev = sbridge_dev->pdev[i]; 1615 if (!pdev) 1616 continue; 1617 1618 switch (pdev->device) { 1619 case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0: 1620 pvt->pci_sad0 = pdev; 1621 break; 1622 case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1: 1623 pvt->pci_sad1 = pdev; 1624 break; 1625 case PCI_DEVICE_ID_INTEL_SBRIDGE_BR: 1626 pvt->pci_br0 = pdev; 1627 break; 1628 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0: 1629 pvt->pci_ha0 = pdev; 1630 break; 1631 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA: 1632 pvt->pci_ta = pdev; 1633 break; 1634 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS: 1635 pvt->pci_ras = pdev; 1636 break; 1637 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0: 1638 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1: 1639 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2: 1640 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3: 1641 { 1642 int id = pdev->device - PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0; 1643 pvt->pci_tad[id] = pdev; 1644 } 1645 break; 1646 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO: 1647 pvt->pci_ddrio = pdev; 1648 break; 1649 default: 1650 goto error; 1651 } 1652 1653 edac_dbg(0, "Associated PCI %02x:%02x, bus %d with dev = %p\n", 1654 pdev->vendor, pdev->device, 1655 sbridge_dev->bus, 1656 pdev); 1657 } 1658 1659 /* Check if everything were registered */ 1660 if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha0 || 1661 !pvt-> pci_tad || !pvt->pci_ras || !pvt->pci_ta) 1662 goto enodev; 1663 1664 for (i = 0; i < NUM_CHANNELS; i++) { 1665 if (!pvt->pci_tad[i]) 1666 goto enodev; 1667 } 1668 return 0; 1669 1670 enodev: 1671 sbridge_printk(KERN_ERR, "Some needed devices are missing\n"); 1672 return -ENODEV; 1673 1674 error: 1675 sbridge_printk(KERN_ERR, "Unexpected device %02x:%02x\n", 1676 PCI_VENDOR_ID_INTEL, pdev->device); 1677 return -EINVAL; 1678 } 1679 1680 static int ibridge_mci_bind_devs(struct mem_ctl_info *mci, 1681 struct sbridge_dev *sbridge_dev) 1682 { 1683 struct sbridge_pvt *pvt = mci->pvt_info; 1684 struct pci_dev *pdev, *tmp; 1685 int i; 1686 bool mode_2ha = false; 1687 1688 tmp = pci_get_device(PCI_VENDOR_ID_INTEL, 1689 PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, NULL); 1690 if (tmp) { 1691 mode_2ha = true; 1692 pci_dev_put(tmp); 1693 } 1694 1695 for (i = 0; i < sbridge_dev->n_devs; i++) { 1696 pdev = sbridge_dev->pdev[i]; 1697 if (!pdev) 1698 continue; 1699 1700 switch (pdev->device) { 1701 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0: 1702 pvt->pci_ha0 = pdev; 1703 break; 1704 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA: 1705 pvt->pci_ta = pdev; 1706 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS: 1707 pvt->pci_ras = pdev; 1708 break; 1709 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2: 1710 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3: 1711 /* if we have 2 HAs active, channels 2 and 3 1712 * are in other device */ 1713 if (mode_2ha) 1714 break; 1715 /* fall through */ 1716 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0: 1717 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1: 1718 { 1719 int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0; 1720 pvt->pci_tad[id] = pdev; 1721 } 1722 break; 1723 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0: 1724 pvt->pci_ddrio = pdev; 1725 break; 1726 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0: 1727 if (!mode_2ha) 1728 pvt->pci_ddrio = pdev; 1729 break; 1730 case PCI_DEVICE_ID_INTEL_IBRIDGE_SAD: 1731 pvt->pci_sad0 = pdev; 1732 break; 1733 case PCI_DEVICE_ID_INTEL_IBRIDGE_BR0: 1734 pvt->pci_br0 = pdev; 1735 break; 1736 case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1: 1737 pvt->pci_br1 = pdev; 1738 break; 1739 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1: 1740 pvt->pci_ha1 = pdev; 1741 break; 1742 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0: 1743 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1: 1744 { 1745 int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 + 2; 1746 1747 /* we shouldn't have this device if we have just one 1748 * HA present */ 1749 WARN_ON(!mode_2ha); 1750 pvt->pci_tad[id] = pdev; 1751 } 1752 break; 1753 default: 1754 goto error; 1755 } 1756 1757 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n", 1758 sbridge_dev->bus, 1759 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), 1760 pdev); 1761 } 1762 1763 /* Check if everything were registered */ 1764 if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_br0 || 1765 !pvt->pci_br1 || !pvt->pci_tad || !pvt->pci_ras || 1766 !pvt->pci_ta) 1767 goto enodev; 1768 1769 for (i = 0; i < NUM_CHANNELS; i++) { 1770 if (!pvt->pci_tad[i]) 1771 goto enodev; 1772 } 1773 return 0; 1774 1775 enodev: 1776 sbridge_printk(KERN_ERR, "Some needed devices are missing\n"); 1777 return -ENODEV; 1778 1779 error: 1780 sbridge_printk(KERN_ERR, 1781 "Unexpected device %02x:%02x\n", PCI_VENDOR_ID_INTEL, 1782 pdev->device); 1783 return -EINVAL; 1784 } 1785 1786 static int haswell_mci_bind_devs(struct mem_ctl_info *mci, 1787 struct sbridge_dev *sbridge_dev) 1788 { 1789 struct sbridge_pvt *pvt = mci->pvt_info; 1790 struct pci_dev *pdev, *tmp; 1791 int i; 1792 bool mode_2ha = false; 1793 1794 tmp = pci_get_device(PCI_VENDOR_ID_INTEL, 1795 PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, NULL); 1796 if (tmp) { 1797 mode_2ha = true; 1798 pci_dev_put(tmp); 1799 } 1800 1801 /* there's only one device per system; not tied to any bus */ 1802 if (pvt->info.pci_vtd == NULL) 1803 /* result will be checked later */ 1804 pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL, 1805 PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC, 1806 NULL); 1807 1808 for (i = 0; i < sbridge_dev->n_devs; i++) { 1809 pdev = sbridge_dev->pdev[i]; 1810 if (!pdev) 1811 continue; 1812 1813 switch (pdev->device) { 1814 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0: 1815 pvt->pci_sad0 = pdev; 1816 break; 1817 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1: 1818 pvt->pci_sad1 = pdev; 1819 break; 1820 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0: 1821 pvt->pci_ha0 = pdev; 1822 break; 1823 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA: 1824 pvt->pci_ta = pdev; 1825 break; 1826 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL: 1827 pvt->pci_ras = pdev; 1828 break; 1829 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0: 1830 pvt->pci_tad[0] = pdev; 1831 break; 1832 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1: 1833 pvt->pci_tad[1] = pdev; 1834 break; 1835 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2: 1836 if (!mode_2ha) 1837 pvt->pci_tad[2] = pdev; 1838 break; 1839 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3: 1840 if (!mode_2ha) 1841 pvt->pci_tad[3] = pdev; 1842 break; 1843 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0: 1844 pvt->pci_ddrio = pdev; 1845 break; 1846 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1: 1847 pvt->pci_ha1 = pdev; 1848 break; 1849 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA: 1850 pvt->pci_ha1_ta = pdev; 1851 break; 1852 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0: 1853 if (mode_2ha) 1854 pvt->pci_tad[2] = pdev; 1855 break; 1856 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1: 1857 if (mode_2ha) 1858 pvt->pci_tad[3] = pdev; 1859 break; 1860 default: 1861 break; 1862 } 1863 1864 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n", 1865 sbridge_dev->bus, 1866 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), 1867 pdev); 1868 } 1869 1870 /* Check if everything were registered */ 1871 if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_sad1 || 1872 !pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd) 1873 goto enodev; 1874 1875 for (i = 0; i < NUM_CHANNELS; i++) { 1876 if (!pvt->pci_tad[i]) 1877 goto enodev; 1878 } 1879 return 0; 1880 1881 enodev: 1882 sbridge_printk(KERN_ERR, "Some needed devices are missing\n"); 1883 return -ENODEV; 1884 } 1885 1886 static int broadwell_mci_bind_devs(struct mem_ctl_info *mci, 1887 struct sbridge_dev *sbridge_dev) 1888 { 1889 struct sbridge_pvt *pvt = mci->pvt_info; 1890 struct pci_dev *pdev; 1891 int i; 1892 1893 /* there's only one device per system; not tied to any bus */ 1894 if (pvt->info.pci_vtd == NULL) 1895 /* result will be checked later */ 1896 pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL, 1897 PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC, 1898 NULL); 1899 1900 for (i = 0; i < sbridge_dev->n_devs; i++) { 1901 pdev = sbridge_dev->pdev[i]; 1902 if (!pdev) 1903 continue; 1904 1905 switch (pdev->device) { 1906 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0: 1907 pvt->pci_sad0 = pdev; 1908 break; 1909 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1: 1910 pvt->pci_sad1 = pdev; 1911 break; 1912 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0: 1913 pvt->pci_ha0 = pdev; 1914 break; 1915 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA: 1916 pvt->pci_ta = pdev; 1917 break; 1918 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL: 1919 pvt->pci_ras = pdev; 1920 break; 1921 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0: 1922 pvt->pci_tad[0] = pdev; 1923 break; 1924 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1: 1925 pvt->pci_tad[1] = pdev; 1926 break; 1927 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2: 1928 pvt->pci_tad[2] = pdev; 1929 break; 1930 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3: 1931 pvt->pci_tad[3] = pdev; 1932 break; 1933 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0: 1934 pvt->pci_ddrio = pdev; 1935 break; 1936 default: 1937 break; 1938 } 1939 1940 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n", 1941 sbridge_dev->bus, 1942 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), 1943 pdev); 1944 } 1945 1946 /* Check if everything were registered */ 1947 if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_sad1 || 1948 !pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd) 1949 goto enodev; 1950 1951 for (i = 0; i < NUM_CHANNELS; i++) { 1952 if (!pvt->pci_tad[i]) 1953 goto enodev; 1954 } 1955 return 0; 1956 1957 enodev: 1958 sbridge_printk(KERN_ERR, "Some needed devices are missing\n"); 1959 return -ENODEV; 1960 } 1961 1962 /**************************************************************************** 1963 Error check routines 1964 ****************************************************************************/ 1965 1966 /* 1967 * While Sandy Bridge has error count registers, SMI BIOS read values from 1968 * and resets the counters. So, they are not reliable for the OS to read 1969 * from them. So, we have no option but to just trust on whatever MCE is 1970 * telling us about the errors. 1971 */ 1972 static void sbridge_mce_output_error(struct mem_ctl_info *mci, 1973 const struct mce *m) 1974 { 1975 struct mem_ctl_info *new_mci; 1976 struct sbridge_pvt *pvt = mci->pvt_info; 1977 enum hw_event_mc_err_type tp_event; 1978 char *type, *optype, msg[256]; 1979 bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0); 1980 bool overflow = GET_BITFIELD(m->status, 62, 62); 1981 bool uncorrected_error = GET_BITFIELD(m->status, 61, 61); 1982 bool recoverable; 1983 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52); 1984 u32 mscod = GET_BITFIELD(m->status, 16, 31); 1985 u32 errcode = GET_BITFIELD(m->status, 0, 15); 1986 u32 channel = GET_BITFIELD(m->status, 0, 3); 1987 u32 optypenum = GET_BITFIELD(m->status, 4, 6); 1988 long channel_mask, first_channel; 1989 u8 rank, socket; 1990 int rc, dimm; 1991 char *area_type = NULL; 1992 1993 if (pvt->info.type == IVY_BRIDGE) 1994 recoverable = true; 1995 else 1996 recoverable = GET_BITFIELD(m->status, 56, 56); 1997 1998 if (uncorrected_error) { 1999 if (ripv) { 2000 type = "FATAL"; 2001 tp_event = HW_EVENT_ERR_FATAL; 2002 } else { 2003 type = "NON_FATAL"; 2004 tp_event = HW_EVENT_ERR_UNCORRECTED; 2005 } 2006 } else { 2007 type = "CORRECTED"; 2008 tp_event = HW_EVENT_ERR_CORRECTED; 2009 } 2010 2011 /* 2012 * According with Table 15-9 of the Intel Architecture spec vol 3A, 2013 * memory errors should fit in this mask: 2014 * 000f 0000 1mmm cccc (binary) 2015 * where: 2016 * f = Correction Report Filtering Bit. If 1, subsequent errors 2017 * won't be shown 2018 * mmm = error type 2019 * cccc = channel 2020 * If the mask doesn't match, report an error to the parsing logic 2021 */ 2022 if (! ((errcode & 0xef80) == 0x80)) { 2023 optype = "Can't parse: it is not a mem"; 2024 } else { 2025 switch (optypenum) { 2026 case 0: 2027 optype = "generic undef request error"; 2028 break; 2029 case 1: 2030 optype = "memory read error"; 2031 break; 2032 case 2: 2033 optype = "memory write error"; 2034 break; 2035 case 3: 2036 optype = "addr/cmd error"; 2037 break; 2038 case 4: 2039 optype = "memory scrubbing error"; 2040 break; 2041 default: 2042 optype = "reserved"; 2043 break; 2044 } 2045 } 2046 2047 /* Only decode errors with an valid address (ADDRV) */ 2048 if (!GET_BITFIELD(m->status, 58, 58)) 2049 return; 2050 2051 rc = get_memory_error_data(mci, m->addr, &socket, 2052 &channel_mask, &rank, &area_type, msg); 2053 if (rc < 0) 2054 goto err_parsing; 2055 new_mci = get_mci_for_node_id(socket); 2056 if (!new_mci) { 2057 strcpy(msg, "Error: socket got corrupted!"); 2058 goto err_parsing; 2059 } 2060 mci = new_mci; 2061 pvt = mci->pvt_info; 2062 2063 first_channel = find_first_bit(&channel_mask, NUM_CHANNELS); 2064 2065 if (rank < 4) 2066 dimm = 0; 2067 else if (rank < 8) 2068 dimm = 1; 2069 else 2070 dimm = 2; 2071 2072 2073 /* 2074 * FIXME: On some memory configurations (mirror, lockstep), the 2075 * Memory Controller can't point the error to a single DIMM. The 2076 * EDAC core should be handling the channel mask, in order to point 2077 * to the group of dimm's where the error may be happening. 2078 */ 2079 if (!pvt->is_lockstep && !pvt->is_mirrored && !pvt->is_close_pg) 2080 channel = first_channel; 2081 2082 snprintf(msg, sizeof(msg), 2083 "%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d", 2084 overflow ? " OVERFLOW" : "", 2085 (uncorrected_error && recoverable) ? " recoverable" : "", 2086 area_type, 2087 mscod, errcode, 2088 socket, 2089 channel_mask, 2090 rank); 2091 2092 edac_dbg(0, "%s\n", msg); 2093 2094 /* FIXME: need support for channel mask */ 2095 2096 if (channel == CHANNEL_UNSPECIFIED) 2097 channel = -1; 2098 2099 /* Call the helper to output message */ 2100 edac_mc_handle_error(tp_event, mci, core_err_cnt, 2101 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, 2102 channel, dimm, -1, 2103 optype, msg); 2104 return; 2105 err_parsing: 2106 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, 2107 -1, -1, -1, 2108 msg, ""); 2109 2110 } 2111 2112 /* 2113 * sbridge_check_error Retrieve and process errors reported by the 2114 * hardware. Called by the Core module. 2115 */ 2116 static void sbridge_check_error(struct mem_ctl_info *mci) 2117 { 2118 struct sbridge_pvt *pvt = mci->pvt_info; 2119 int i; 2120 unsigned count = 0; 2121 struct mce *m; 2122 2123 /* 2124 * MCE first step: Copy all mce errors into a temporary buffer 2125 * We use a double buffering here, to reduce the risk of 2126 * loosing an error. 2127 */ 2128 smp_rmb(); 2129 count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in) 2130 % MCE_LOG_LEN; 2131 if (!count) 2132 return; 2133 2134 m = pvt->mce_outentry; 2135 if (pvt->mce_in + count > MCE_LOG_LEN) { 2136 unsigned l = MCE_LOG_LEN - pvt->mce_in; 2137 2138 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l); 2139 smp_wmb(); 2140 pvt->mce_in = 0; 2141 count -= l; 2142 m += l; 2143 } 2144 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count); 2145 smp_wmb(); 2146 pvt->mce_in += count; 2147 2148 smp_rmb(); 2149 if (pvt->mce_overrun) { 2150 sbridge_printk(KERN_ERR, "Lost %d memory errors\n", 2151 pvt->mce_overrun); 2152 smp_wmb(); 2153 pvt->mce_overrun = 0; 2154 } 2155 2156 /* 2157 * MCE second step: parse errors and display 2158 */ 2159 for (i = 0; i < count; i++) 2160 sbridge_mce_output_error(mci, &pvt->mce_outentry[i]); 2161 } 2162 2163 /* 2164 * sbridge_mce_check_error Replicates mcelog routine to get errors 2165 * This routine simply queues mcelog errors, and 2166 * return. The error itself should be handled later 2167 * by sbridge_check_error. 2168 * WARNING: As this routine should be called at NMI time, extra care should 2169 * be taken to avoid deadlocks, and to be as fast as possible. 2170 */ 2171 static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val, 2172 void *data) 2173 { 2174 struct mce *mce = (struct mce *)data; 2175 struct mem_ctl_info *mci; 2176 struct sbridge_pvt *pvt; 2177 char *type; 2178 2179 if (get_edac_report_status() == EDAC_REPORTING_DISABLED) 2180 return NOTIFY_DONE; 2181 2182 mci = get_mci_for_node_id(mce->socketid); 2183 if (!mci) 2184 return NOTIFY_BAD; 2185 pvt = mci->pvt_info; 2186 2187 /* 2188 * Just let mcelog handle it if the error is 2189 * outside the memory controller. A memory error 2190 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0. 2191 * bit 12 has an special meaning. 2192 */ 2193 if ((mce->status & 0xefff) >> 7 != 1) 2194 return NOTIFY_DONE; 2195 2196 if (mce->mcgstatus & MCG_STATUS_MCIP) 2197 type = "Exception"; 2198 else 2199 type = "Event"; 2200 2201 sbridge_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n"); 2202 2203 sbridge_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx " 2204 "Bank %d: %016Lx\n", mce->extcpu, type, 2205 mce->mcgstatus, mce->bank, mce->status); 2206 sbridge_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc); 2207 sbridge_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr); 2208 sbridge_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc); 2209 2210 sbridge_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET " 2211 "%u APIC %x\n", mce->cpuvendor, mce->cpuid, 2212 mce->time, mce->socketid, mce->apicid); 2213 2214 smp_rmb(); 2215 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) { 2216 smp_wmb(); 2217 pvt->mce_overrun++; 2218 return NOTIFY_DONE; 2219 } 2220 2221 /* Copy memory error at the ringbuffer */ 2222 memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce)); 2223 smp_wmb(); 2224 pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN; 2225 2226 /* Handle fatal errors immediately */ 2227 if (mce->mcgstatus & 1) 2228 sbridge_check_error(mci); 2229 2230 /* Advice mcelog that the error were handled */ 2231 return NOTIFY_STOP; 2232 } 2233 2234 static struct notifier_block sbridge_mce_dec = { 2235 .notifier_call = sbridge_mce_check_error, 2236 }; 2237 2238 /**************************************************************************** 2239 EDAC register/unregister logic 2240 ****************************************************************************/ 2241 2242 static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev) 2243 { 2244 struct mem_ctl_info *mci = sbridge_dev->mci; 2245 struct sbridge_pvt *pvt; 2246 2247 if (unlikely(!mci || !mci->pvt_info)) { 2248 edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev); 2249 2250 sbridge_printk(KERN_ERR, "Couldn't find mci handler\n"); 2251 return; 2252 } 2253 2254 pvt = mci->pvt_info; 2255 2256 edac_dbg(0, "MC: mci = %p, dev = %p\n", 2257 mci, &sbridge_dev->pdev[0]->dev); 2258 2259 /* Remove MC sysfs nodes */ 2260 edac_mc_del_mc(mci->pdev); 2261 2262 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name); 2263 kfree(mci->ctl_name); 2264 edac_mc_free(mci); 2265 sbridge_dev->mci = NULL; 2266 } 2267 2268 static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) 2269 { 2270 struct mem_ctl_info *mci; 2271 struct edac_mc_layer layers[2]; 2272 struct sbridge_pvt *pvt; 2273 struct pci_dev *pdev = sbridge_dev->pdev[0]; 2274 int rc; 2275 2276 /* Check the number of active and not disabled channels */ 2277 rc = check_if_ecc_is_active(sbridge_dev->bus, type); 2278 if (unlikely(rc < 0)) 2279 return rc; 2280 2281 /* allocate a new MC control structure */ 2282 layers[0].type = EDAC_MC_LAYER_CHANNEL; 2283 layers[0].size = NUM_CHANNELS; 2284 layers[0].is_virt_csrow = false; 2285 layers[1].type = EDAC_MC_LAYER_SLOT; 2286 layers[1].size = MAX_DIMMS; 2287 layers[1].is_virt_csrow = true; 2288 mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers, 2289 sizeof(*pvt)); 2290 2291 if (unlikely(!mci)) 2292 return -ENOMEM; 2293 2294 edac_dbg(0, "MC: mci = %p, dev = %p\n", 2295 mci, &pdev->dev); 2296 2297 pvt = mci->pvt_info; 2298 memset(pvt, 0, sizeof(*pvt)); 2299 2300 /* Associate sbridge_dev and mci for future usage */ 2301 pvt->sbridge_dev = sbridge_dev; 2302 sbridge_dev->mci = mci; 2303 2304 mci->mtype_cap = MEM_FLAG_DDR3; 2305 mci->edac_ctl_cap = EDAC_FLAG_NONE; 2306 mci->edac_cap = EDAC_FLAG_NONE; 2307 mci->mod_name = "sbridge_edac.c"; 2308 mci->mod_ver = SBRIDGE_REVISION; 2309 mci->dev_name = pci_name(pdev); 2310 mci->ctl_page_to_phys = NULL; 2311 2312 /* Set the function pointer to an actual operation function */ 2313 mci->edac_check = sbridge_check_error; 2314 2315 pvt->info.type = type; 2316 switch (type) { 2317 case IVY_BRIDGE: 2318 pvt->info.rankcfgr = IB_RANK_CFG_A; 2319 pvt->info.get_tolm = ibridge_get_tolm; 2320 pvt->info.get_tohm = ibridge_get_tohm; 2321 pvt->info.dram_rule = ibridge_dram_rule; 2322 pvt->info.get_memory_type = get_memory_type; 2323 pvt->info.get_node_id = get_node_id; 2324 pvt->info.rir_limit = rir_limit; 2325 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule); 2326 pvt->info.interleave_list = ibridge_interleave_list; 2327 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); 2328 pvt->info.interleave_pkg = ibridge_interleave_pkg; 2329 mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge Socket#%d", mci->mc_idx); 2330 2331 /* Store pci devices at mci for faster access */ 2332 rc = ibridge_mci_bind_devs(mci, sbridge_dev); 2333 if (unlikely(rc < 0)) 2334 goto fail0; 2335 break; 2336 case SANDY_BRIDGE: 2337 pvt->info.rankcfgr = SB_RANK_CFG_A; 2338 pvt->info.get_tolm = sbridge_get_tolm; 2339 pvt->info.get_tohm = sbridge_get_tohm; 2340 pvt->info.dram_rule = sbridge_dram_rule; 2341 pvt->info.get_memory_type = get_memory_type; 2342 pvt->info.get_node_id = get_node_id; 2343 pvt->info.rir_limit = rir_limit; 2344 pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule); 2345 pvt->info.interleave_list = sbridge_interleave_list; 2346 pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list); 2347 pvt->info.interleave_pkg = sbridge_interleave_pkg; 2348 mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx); 2349 2350 /* Store pci devices at mci for faster access */ 2351 rc = sbridge_mci_bind_devs(mci, sbridge_dev); 2352 if (unlikely(rc < 0)) 2353 goto fail0; 2354 break; 2355 case HASWELL: 2356 /* rankcfgr isn't used */ 2357 pvt->info.get_tolm = haswell_get_tolm; 2358 pvt->info.get_tohm = haswell_get_tohm; 2359 pvt->info.dram_rule = ibridge_dram_rule; 2360 pvt->info.get_memory_type = haswell_get_memory_type; 2361 pvt->info.get_node_id = haswell_get_node_id; 2362 pvt->info.rir_limit = haswell_rir_limit; 2363 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule); 2364 pvt->info.interleave_list = ibridge_interleave_list; 2365 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); 2366 pvt->info.interleave_pkg = ibridge_interleave_pkg; 2367 mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell Socket#%d", mci->mc_idx); 2368 2369 /* Store pci devices at mci for faster access */ 2370 rc = haswell_mci_bind_devs(mci, sbridge_dev); 2371 if (unlikely(rc < 0)) 2372 goto fail0; 2373 break; 2374 case BROADWELL: 2375 /* rankcfgr isn't used */ 2376 pvt->info.get_tolm = haswell_get_tolm; 2377 pvt->info.get_tohm = haswell_get_tohm; 2378 pvt->info.dram_rule = ibridge_dram_rule; 2379 pvt->info.get_memory_type = haswell_get_memory_type; 2380 pvt->info.get_node_id = haswell_get_node_id; 2381 pvt->info.rir_limit = haswell_rir_limit; 2382 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule); 2383 pvt->info.interleave_list = ibridge_interleave_list; 2384 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); 2385 pvt->info.interleave_pkg = ibridge_interleave_pkg; 2386 mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell Socket#%d", mci->mc_idx); 2387 2388 /* Store pci devices at mci for faster access */ 2389 rc = broadwell_mci_bind_devs(mci, sbridge_dev); 2390 if (unlikely(rc < 0)) 2391 goto fail0; 2392 break; 2393 } 2394 2395 /* Get dimm basic config and the memory layout */ 2396 get_dimm_config(mci); 2397 get_memory_layout(mci); 2398 2399 /* record ptr to the generic device */ 2400 mci->pdev = &pdev->dev; 2401 2402 /* add this new MC control structure to EDAC's list of MCs */ 2403 if (unlikely(edac_mc_add_mc(mci))) { 2404 edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); 2405 rc = -EINVAL; 2406 goto fail0; 2407 } 2408 2409 return 0; 2410 2411 fail0: 2412 kfree(mci->ctl_name); 2413 edac_mc_free(mci); 2414 sbridge_dev->mci = NULL; 2415 return rc; 2416 } 2417 2418 /* 2419 * sbridge_probe Probe for ONE instance of device to see if it is 2420 * present. 2421 * return: 2422 * 0 for FOUND a device 2423 * < 0 for error code 2424 */ 2425 2426 static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2427 { 2428 int rc = -ENODEV; 2429 u8 mc, num_mc = 0; 2430 struct sbridge_dev *sbridge_dev; 2431 enum type type = SANDY_BRIDGE; 2432 2433 /* get the pci devices we want to reserve for our use */ 2434 mutex_lock(&sbridge_edac_lock); 2435 2436 /* 2437 * All memory controllers are allocated at the first pass. 2438 */ 2439 if (unlikely(probed >= 1)) { 2440 mutex_unlock(&sbridge_edac_lock); 2441 return -ENODEV; 2442 } 2443 probed++; 2444 2445 switch (pdev->device) { 2446 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA: 2447 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_ibridge_table); 2448 type = IVY_BRIDGE; 2449 break; 2450 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA: 2451 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_sbridge_table); 2452 type = SANDY_BRIDGE; 2453 break; 2454 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0: 2455 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_haswell_table); 2456 type = HASWELL; 2457 break; 2458 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0: 2459 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_broadwell_table); 2460 type = BROADWELL; 2461 break; 2462 } 2463 if (unlikely(rc < 0)) 2464 goto fail0; 2465 mc = 0; 2466 2467 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { 2468 edac_dbg(0, "Registering MC#%d (%d of %d)\n", 2469 mc, mc + 1, num_mc); 2470 2471 sbridge_dev->mc = mc++; 2472 rc = sbridge_register_mci(sbridge_dev, type); 2473 if (unlikely(rc < 0)) 2474 goto fail1; 2475 } 2476 2477 sbridge_printk(KERN_INFO, "Driver loaded.\n"); 2478 2479 mutex_unlock(&sbridge_edac_lock); 2480 return 0; 2481 2482 fail1: 2483 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) 2484 sbridge_unregister_mci(sbridge_dev); 2485 2486 sbridge_put_all_devices(); 2487 fail0: 2488 mutex_unlock(&sbridge_edac_lock); 2489 return rc; 2490 } 2491 2492 /* 2493 * sbridge_remove destructor for one instance of device 2494 * 2495 */ 2496 static void sbridge_remove(struct pci_dev *pdev) 2497 { 2498 struct sbridge_dev *sbridge_dev; 2499 2500 edac_dbg(0, "\n"); 2501 2502 /* 2503 * we have a trouble here: pdev value for removal will be wrong, since 2504 * it will point to the X58 register used to detect that the machine 2505 * is a Nehalem or upper design. However, due to the way several PCI 2506 * devices are grouped together to provide MC functionality, we need 2507 * to use a different method for releasing the devices 2508 */ 2509 2510 mutex_lock(&sbridge_edac_lock); 2511 2512 if (unlikely(!probed)) { 2513 mutex_unlock(&sbridge_edac_lock); 2514 return; 2515 } 2516 2517 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) 2518 sbridge_unregister_mci(sbridge_dev); 2519 2520 /* Release PCI resources */ 2521 sbridge_put_all_devices(); 2522 2523 probed--; 2524 2525 mutex_unlock(&sbridge_edac_lock); 2526 } 2527 2528 MODULE_DEVICE_TABLE(pci, sbridge_pci_tbl); 2529 2530 /* 2531 * sbridge_driver pci_driver structure for this module 2532 * 2533 */ 2534 static struct pci_driver sbridge_driver = { 2535 .name = "sbridge_edac", 2536 .probe = sbridge_probe, 2537 .remove = sbridge_remove, 2538 .id_table = sbridge_pci_tbl, 2539 }; 2540 2541 /* 2542 * sbridge_init Module entry function 2543 * Try to initialize this module for its devices 2544 */ 2545 static int __init sbridge_init(void) 2546 { 2547 int pci_rc; 2548 2549 edac_dbg(2, "\n"); 2550 2551 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 2552 opstate_init(); 2553 2554 pci_rc = pci_register_driver(&sbridge_driver); 2555 if (pci_rc >= 0) { 2556 mce_register_decode_chain(&sbridge_mce_dec); 2557 if (get_edac_report_status() == EDAC_REPORTING_DISABLED) 2558 sbridge_printk(KERN_WARNING, "Loading driver, error reporting disabled.\n"); 2559 return 0; 2560 } 2561 2562 sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n", 2563 pci_rc); 2564 2565 return pci_rc; 2566 } 2567 2568 /* 2569 * sbridge_exit() Module exit function 2570 * Unregister the driver 2571 */ 2572 static void __exit sbridge_exit(void) 2573 { 2574 edac_dbg(2, "\n"); 2575 pci_unregister_driver(&sbridge_driver); 2576 mce_unregister_decode_chain(&sbridge_mce_dec); 2577 } 2578 2579 module_init(sbridge_init); 2580 module_exit(sbridge_exit); 2581 2582 module_param(edac_op_state, int, 0444); 2583 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 2584 2585 MODULE_LICENSE("GPL"); 2586 MODULE_AUTHOR("Mauro Carvalho Chehab"); 2587 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)"); 2588 MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge and Ivy Bridge memory controllers - " 2589 SBRIDGE_REVISION); 2590