1 /* 2 * Intel D82875P Memory Controller kernel module 3 * (C) 2003 Linux Networx (http://lnxi.com) 4 * This file may be distributed under the terms of the 5 * GNU General Public License. 6 * 7 * Written by Thayne Harbaugh 8 * Contributors: 9 * Wang Zhenyu at intel.com 10 * 11 * $Id: edac_i82875p.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $ 12 * 13 * Note: E7210 appears same as D82875P - zhenyu.z.wang at intel.com 14 */ 15 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/pci.h> 19 #include <linux/pci_ids.h> 20 #include <linux/edac.h> 21 #include "edac_core.h" 22 23 #define I82875P_REVISION " Ver: 2.0.2" 24 #define EDAC_MOD_STR "i82875p_edac" 25 26 #define i82875p_printk(level, fmt, arg...) \ 27 edac_printk(level, "i82875p", fmt, ##arg) 28 29 #define i82875p_mc_printk(mci, level, fmt, arg...) \ 30 edac_mc_chipset_printk(mci, level, "i82875p", fmt, ##arg) 31 32 #ifndef PCI_DEVICE_ID_INTEL_82875_0 33 #define PCI_DEVICE_ID_INTEL_82875_0 0x2578 34 #endif /* PCI_DEVICE_ID_INTEL_82875_0 */ 35 36 #ifndef PCI_DEVICE_ID_INTEL_82875_6 37 #define PCI_DEVICE_ID_INTEL_82875_6 0x257e 38 #endif /* PCI_DEVICE_ID_INTEL_82875_6 */ 39 40 /* four csrows in dual channel, eight in single channel */ 41 #define I82875P_NR_DIMMS 8 42 #define I82875P_NR_CSROWS(nr_chans) (I82875P_NR_DIMMS / (nr_chans)) 43 44 /* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */ 45 #define I82875P_EAP 0x58 /* Error Address Pointer (32b) 46 * 47 * 31:12 block address 48 * 11:0 reserved 49 */ 50 51 #define I82875P_DERRSYN 0x5c /* DRAM Error Syndrome (8b) 52 * 53 * 7:0 DRAM ECC Syndrome 54 */ 55 56 #define I82875P_DES 0x5d /* DRAM Error Status (8b) 57 * 58 * 7:1 reserved 59 * 0 Error channel 0/1 60 */ 61 62 #define I82875P_ERRSTS 0xc8 /* Error Status Register (16b) 63 * 64 * 15:10 reserved 65 * 9 non-DRAM lock error (ndlock) 66 * 8 Sftwr Generated SMI 67 * 7 ECC UE 68 * 6 reserved 69 * 5 MCH detects unimplemented cycle 70 * 4 AGP access outside GA 71 * 3 Invalid AGP access 72 * 2 Invalid GA translation table 73 * 1 Unsupported AGP command 74 * 0 ECC CE 75 */ 76 77 #define I82875P_ERRCMD 0xca /* Error Command (16b) 78 * 79 * 15:10 reserved 80 * 9 SERR on non-DRAM lock 81 * 8 SERR on ECC UE 82 * 7 SERR on ECC CE 83 * 6 target abort on high exception 84 * 5 detect unimplemented cyc 85 * 4 AGP access outside of GA 86 * 3 SERR on invalid AGP access 87 * 2 invalid translation table 88 * 1 SERR on unsupported AGP command 89 * 0 reserved 90 */ 91 92 /* Intel 82875p register addresses - device 6 function 0 - DRAM Controller */ 93 #define I82875P_PCICMD6 0x04 /* PCI Command Register (16b) 94 * 95 * 15:10 reserved 96 * 9 fast back-to-back - ro 0 97 * 8 SERR enable - ro 0 98 * 7 addr/data stepping - ro 0 99 * 6 parity err enable - ro 0 100 * 5 VGA palette snoop - ro 0 101 * 4 mem wr & invalidate - ro 0 102 * 3 special cycle - ro 0 103 * 2 bus master - ro 0 104 * 1 mem access dev6 - 0(dis),1(en) 105 * 0 IO access dev3 - 0(dis),1(en) 106 */ 107 108 #define I82875P_BAR6 0x10 /* Mem Delays Base ADDR Reg (32b) 109 * 110 * 31:12 mem base addr [31:12] 111 * 11:4 address mask - ro 0 112 * 3 prefetchable - ro 0(non),1(pre) 113 * 2:1 mem type - ro 0 114 * 0 mem space - ro 0 115 */ 116 117 /* Intel 82875p MMIO register space - device 0 function 0 - MMR space */ 118 119 #define I82875P_DRB_SHIFT 26 /* 64MiB grain */ 120 #define I82875P_DRB 0x00 /* DRAM Row Boundary (8b x 8) 121 * 122 * 7 reserved 123 * 6:0 64MiB row boundary addr 124 */ 125 126 #define I82875P_DRA 0x10 /* DRAM Row Attribute (4b x 8) 127 * 128 * 7 reserved 129 * 6:4 row attr row 1 130 * 3 reserved 131 * 2:0 row attr row 0 132 * 133 * 000 = 4KiB 134 * 001 = 8KiB 135 * 010 = 16KiB 136 * 011 = 32KiB 137 */ 138 139 #define I82875P_DRC 0x68 /* DRAM Controller Mode (32b) 140 * 141 * 31:30 reserved 142 * 29 init complete 143 * 28:23 reserved 144 * 22:21 nr chan 00=1,01=2 145 * 20 reserved 146 * 19:18 Data Integ Mode 00=none,01=ecc 147 * 17:11 reserved 148 * 10:8 refresh mode 149 * 7 reserved 150 * 6:4 mode select 151 * 3:2 reserved 152 * 1:0 DRAM type 01=DDR 153 */ 154 155 enum i82875p_chips { 156 I82875P = 0, 157 }; 158 159 struct i82875p_pvt { 160 struct pci_dev *ovrfl_pdev; 161 void __iomem *ovrfl_window; 162 }; 163 164 struct i82875p_dev_info { 165 const char *ctl_name; 166 }; 167 168 struct i82875p_error_info { 169 u16 errsts; 170 u32 eap; 171 u8 des; 172 u8 derrsyn; 173 u16 errsts2; 174 }; 175 176 static const struct i82875p_dev_info i82875p_devs[] = { 177 [I82875P] = { 178 .ctl_name = "i82875p"}, 179 }; 180 181 static struct pci_dev *mci_pdev; /* init dev: in case that AGP code has 182 * already registered driver 183 */ 184 185 static struct edac_pci_ctl_info *i82875p_pci; 186 187 static void i82875p_get_error_info(struct mem_ctl_info *mci, 188 struct i82875p_error_info *info) 189 { 190 struct pci_dev *pdev; 191 192 pdev = to_pci_dev(mci->pdev); 193 194 /* 195 * This is a mess because there is no atomic way to read all the 196 * registers at once and the registers can transition from CE being 197 * overwritten by UE. 198 */ 199 pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts); 200 201 if (!(info->errsts & 0x0081)) 202 return; 203 204 pci_read_config_dword(pdev, I82875P_EAP, &info->eap); 205 pci_read_config_byte(pdev, I82875P_DES, &info->des); 206 pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn); 207 pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts2); 208 209 /* 210 * If the error is the same then we can for both reads then 211 * the first set of reads is valid. If there is a change then 212 * there is a CE no info and the second set of reads is valid 213 * and should be UE info. 214 */ 215 if ((info->errsts ^ info->errsts2) & 0x0081) { 216 pci_read_config_dword(pdev, I82875P_EAP, &info->eap); 217 pci_read_config_byte(pdev, I82875P_DES, &info->des); 218 pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn); 219 } 220 221 pci_write_bits16(pdev, I82875P_ERRSTS, 0x0081, 0x0081); 222 } 223 224 static int i82875p_process_error_info(struct mem_ctl_info *mci, 225 struct i82875p_error_info *info, 226 int handle_errors) 227 { 228 int row, multi_chan; 229 230 multi_chan = mci->csrows[0]->nr_channels - 1; 231 232 if (!(info->errsts & 0x0081)) 233 return 0; 234 235 if (!handle_errors) 236 return 1; 237 238 if ((info->errsts ^ info->errsts2) & 0x0081) { 239 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, 240 -1, -1, -1, 241 "UE overwrote CE", ""); 242 info->errsts = info->errsts2; 243 } 244 245 info->eap >>= PAGE_SHIFT; 246 row = edac_mc_find_csrow_by_page(mci, info->eap); 247 248 if (info->errsts & 0x0080) 249 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 250 info->eap, 0, 0, 251 row, -1, -1, 252 "i82875p UE", ""); 253 else 254 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 255 info->eap, 0, info->derrsyn, 256 row, multi_chan ? (info->des & 0x1) : 0, 257 -1, "i82875p CE", ""); 258 259 return 1; 260 } 261 262 static void i82875p_check(struct mem_ctl_info *mci) 263 { 264 struct i82875p_error_info info; 265 266 edac_dbg(1, "MC%d\n", mci->mc_idx); 267 i82875p_get_error_info(mci, &info); 268 i82875p_process_error_info(mci, &info, 1); 269 } 270 271 /* Return 0 on success or 1 on failure. */ 272 static int i82875p_setup_overfl_dev(struct pci_dev *pdev, 273 struct pci_dev **ovrfl_pdev, 274 void __iomem **ovrfl_window) 275 { 276 struct pci_dev *dev; 277 void __iomem *window; 278 279 *ovrfl_pdev = NULL; 280 *ovrfl_window = NULL; 281 dev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); 282 283 if (dev == NULL) { 284 /* Intel tells BIOS developers to hide device 6 which 285 * configures the overflow device access containing 286 * the DRBs - this is where we expose device 6. 287 * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm 288 */ 289 pci_write_bits8(pdev, 0xf4, 0x2, 0x2); 290 dev = pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0)); 291 292 if (dev == NULL) 293 return 1; 294 295 pci_bus_assign_resources(dev->bus); 296 pci_bus_add_device(dev); 297 } 298 299 *ovrfl_pdev = dev; 300 301 if (pci_enable_device(dev)) { 302 i82875p_printk(KERN_ERR, "%s(): Failed to enable overflow " 303 "device\n", __func__); 304 return 1; 305 } 306 307 if (pci_request_regions(dev, pci_name(dev))) { 308 #ifdef CORRECT_BIOS 309 goto fail0; 310 #endif 311 } 312 313 /* cache is irrelevant for PCI bus reads/writes */ 314 window = pci_ioremap_bar(dev, 0); 315 if (window == NULL) { 316 i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n", 317 __func__); 318 goto fail1; 319 } 320 321 *ovrfl_window = window; 322 return 0; 323 324 fail1: 325 pci_release_regions(dev); 326 327 #ifdef CORRECT_BIOS 328 fail0: 329 pci_disable_device(dev); 330 #endif 331 /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */ 332 return 1; 333 } 334 335 /* Return 1 if dual channel mode is active. Else return 0. */ 336 static inline int dual_channel_active(u32 drc) 337 { 338 return (drc >> 21) & 0x1; 339 } 340 341 static void i82875p_init_csrows(struct mem_ctl_info *mci, 342 struct pci_dev *pdev, 343 void __iomem * ovrfl_window, u32 drc) 344 { 345 struct csrow_info *csrow; 346 struct dimm_info *dimm; 347 unsigned nr_chans = dual_channel_active(drc) + 1; 348 unsigned long last_cumul_size; 349 u8 value; 350 u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ 351 u32 cumul_size, nr_pages; 352 int index, j; 353 354 drc_ddim = (drc >> 18) & 0x1; 355 last_cumul_size = 0; 356 357 /* The dram row boundary (DRB) reg values are boundary address 358 * for each DRAM row with a granularity of 32 or 64MB (single/dual 359 * channel operation). DRB regs are cumulative; therefore DRB7 will 360 * contain the total memory contained in all eight rows. 361 */ 362 363 for (index = 0; index < mci->nr_csrows; index++) { 364 csrow = mci->csrows[index]; 365 366 value = readb(ovrfl_window + I82875P_DRB + index); 367 cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT); 368 edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size); 369 if (cumul_size == last_cumul_size) 370 continue; /* not populated */ 371 372 csrow->first_page = last_cumul_size; 373 csrow->last_page = cumul_size - 1; 374 nr_pages = cumul_size - last_cumul_size; 375 last_cumul_size = cumul_size; 376 377 for (j = 0; j < nr_chans; j++) { 378 dimm = csrow->channels[j]->dimm; 379 380 dimm->nr_pages = nr_pages / nr_chans; 381 dimm->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ 382 dimm->mtype = MEM_DDR; 383 dimm->dtype = DEV_UNKNOWN; 384 dimm->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE; 385 } 386 } 387 } 388 389 static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) 390 { 391 int rc = -ENODEV; 392 struct mem_ctl_info *mci; 393 struct edac_mc_layer layers[2]; 394 struct i82875p_pvt *pvt; 395 struct pci_dev *ovrfl_pdev; 396 void __iomem *ovrfl_window; 397 u32 drc; 398 u32 nr_chans; 399 struct i82875p_error_info discard; 400 401 edac_dbg(0, "\n"); 402 403 if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window)) 404 return -ENODEV; 405 drc = readl(ovrfl_window + I82875P_DRC); 406 nr_chans = dual_channel_active(drc) + 1; 407 408 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; 409 layers[0].size = I82875P_NR_CSROWS(nr_chans); 410 layers[0].is_virt_csrow = true; 411 layers[1].type = EDAC_MC_LAYER_CHANNEL; 412 layers[1].size = nr_chans; 413 layers[1].is_virt_csrow = false; 414 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt)); 415 if (!mci) { 416 rc = -ENOMEM; 417 goto fail0; 418 } 419 420 edac_dbg(3, "init mci\n"); 421 mci->pdev = &pdev->dev; 422 mci->mtype_cap = MEM_FLAG_DDR; 423 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 424 mci->edac_cap = EDAC_FLAG_UNKNOWN; 425 mci->mod_name = EDAC_MOD_STR; 426 mci->mod_ver = I82875P_REVISION; 427 mci->ctl_name = i82875p_devs[dev_idx].ctl_name; 428 mci->dev_name = pci_name(pdev); 429 mci->edac_check = i82875p_check; 430 mci->ctl_page_to_phys = NULL; 431 edac_dbg(3, "init pvt\n"); 432 pvt = (struct i82875p_pvt *)mci->pvt_info; 433 pvt->ovrfl_pdev = ovrfl_pdev; 434 pvt->ovrfl_window = ovrfl_window; 435 i82875p_init_csrows(mci, pdev, ovrfl_window, drc); 436 i82875p_get_error_info(mci, &discard); /* clear counters */ 437 438 /* Here we assume that we will never see multiple instances of this 439 * type of memory controller. The ID is therefore hardcoded to 0. 440 */ 441 if (edac_mc_add_mc(mci)) { 442 edac_dbg(3, "failed edac_mc_add_mc()\n"); 443 goto fail1; 444 } 445 446 /* allocating generic PCI control info */ 447 i82875p_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); 448 if (!i82875p_pci) { 449 printk(KERN_WARNING 450 "%s(): Unable to create PCI control\n", 451 __func__); 452 printk(KERN_WARNING 453 "%s(): PCI error report via EDAC not setup\n", 454 __func__); 455 } 456 457 /* get this far and it's successful */ 458 edac_dbg(3, "success\n"); 459 return 0; 460 461 fail1: 462 edac_mc_free(mci); 463 464 fail0: 465 iounmap(ovrfl_window); 466 pci_release_regions(ovrfl_pdev); 467 468 pci_disable_device(ovrfl_pdev); 469 /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */ 470 return rc; 471 } 472 473 /* returns count (>= 0), or negative on error */ 474 static int i82875p_init_one(struct pci_dev *pdev, 475 const struct pci_device_id *ent) 476 { 477 int rc; 478 479 edac_dbg(0, "\n"); 480 i82875p_printk(KERN_INFO, "i82875p init one\n"); 481 482 if (pci_enable_device(pdev) < 0) 483 return -EIO; 484 485 rc = i82875p_probe1(pdev, ent->driver_data); 486 487 if (mci_pdev == NULL) 488 mci_pdev = pci_dev_get(pdev); 489 490 return rc; 491 } 492 493 static void i82875p_remove_one(struct pci_dev *pdev) 494 { 495 struct mem_ctl_info *mci; 496 struct i82875p_pvt *pvt = NULL; 497 498 edac_dbg(0, "\n"); 499 500 if (i82875p_pci) 501 edac_pci_release_generic_ctl(i82875p_pci); 502 503 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) 504 return; 505 506 pvt = (struct i82875p_pvt *)mci->pvt_info; 507 508 if (pvt->ovrfl_window) 509 iounmap(pvt->ovrfl_window); 510 511 if (pvt->ovrfl_pdev) { 512 #ifdef CORRECT_BIOS 513 pci_release_regions(pvt->ovrfl_pdev); 514 #endif /*CORRECT_BIOS */ 515 pci_disable_device(pvt->ovrfl_pdev); 516 pci_dev_put(pvt->ovrfl_pdev); 517 } 518 519 edac_mc_free(mci); 520 } 521 522 static const struct pci_device_id i82875p_pci_tbl[] = { 523 { 524 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 525 I82875P}, 526 { 527 0, 528 } /* 0 terminated list. */ 529 }; 530 531 MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl); 532 533 static struct pci_driver i82875p_driver = { 534 .name = EDAC_MOD_STR, 535 .probe = i82875p_init_one, 536 .remove = i82875p_remove_one, 537 .id_table = i82875p_pci_tbl, 538 }; 539 540 static int __init i82875p_init(void) 541 { 542 int pci_rc; 543 544 edac_dbg(3, "\n"); 545 546 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 547 opstate_init(); 548 549 pci_rc = pci_register_driver(&i82875p_driver); 550 551 if (pci_rc < 0) 552 goto fail0; 553 554 if (mci_pdev == NULL) { 555 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 556 PCI_DEVICE_ID_INTEL_82875_0, NULL); 557 558 if (!mci_pdev) { 559 edac_dbg(0, "875p pci_get_device fail\n"); 560 pci_rc = -ENODEV; 561 goto fail1; 562 } 563 564 pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl); 565 566 if (pci_rc < 0) { 567 edac_dbg(0, "875p init fail\n"); 568 pci_rc = -ENODEV; 569 goto fail1; 570 } 571 } 572 573 return 0; 574 575 fail1: 576 pci_unregister_driver(&i82875p_driver); 577 578 fail0: 579 pci_dev_put(mci_pdev); 580 return pci_rc; 581 } 582 583 static void __exit i82875p_exit(void) 584 { 585 edac_dbg(3, "\n"); 586 587 i82875p_remove_one(mci_pdev); 588 pci_dev_put(mci_pdev); 589 590 pci_unregister_driver(&i82875p_driver); 591 592 } 593 594 module_init(i82875p_init); 595 module_exit(i82875p_exit); 596 597 MODULE_LICENSE("GPL"); 598 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh"); 599 MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers"); 600 601 module_param(edac_op_state, int, 0444); 602 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 603