1 /* 2 * Freescale MPC85xx Memory Controller kernel module 3 * 4 * Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc. 5 * 6 * Author: Dave Jiang <djiang@mvista.com> 7 * 8 * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under 9 * the terms of the GNU General Public License version 2. This program 10 * is licensed "as is" without any warranty of any kind, whether express 11 * or implied. 12 * 13 */ 14 #include <linux/module.h> 15 #include <linux/init.h> 16 #include <linux/interrupt.h> 17 #include <linux/ctype.h> 18 #include <linux/io.h> 19 #include <linux/mod_devicetable.h> 20 #include <linux/edac.h> 21 #include <linux/smp.h> 22 #include <linux/gfp.h> 23 #include <linux/fsl/edac.h> 24 25 #include <linux/of_platform.h> 26 #include <linux/of_device.h> 27 #include "edac_module.h" 28 #include "edac_core.h" 29 #include "mpc85xx_edac.h" 30 31 static int edac_dev_idx; 32 #ifdef CONFIG_PCI 33 static int edac_pci_idx; 34 #endif 35 static int edac_mc_idx; 36 37 static u32 orig_ddr_err_disable; 38 static u32 orig_ddr_err_sbe; 39 40 /* 41 * PCI Err defines 42 */ 43 #ifdef CONFIG_PCI 44 static u32 orig_pci_err_cap_dr; 45 static u32 orig_pci_err_en; 46 #endif 47 48 static u32 orig_l2_err_disable; 49 #ifdef CONFIG_FSL_SOC_BOOKE 50 static u32 orig_hid1[2]; 51 #endif 52 53 /************************ MC SYSFS parts ***********************************/ 54 55 #define to_mci(k) container_of(k, struct mem_ctl_info, dev) 56 57 static ssize_t mpc85xx_mc_inject_data_hi_show(struct device *dev, 58 struct device_attribute *mattr, 59 char *data) 60 { 61 struct mem_ctl_info *mci = to_mci(dev); 62 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 63 return sprintf(data, "0x%08x", 64 in_be32(pdata->mc_vbase + 65 MPC85XX_MC_DATA_ERR_INJECT_HI)); 66 } 67 68 static ssize_t mpc85xx_mc_inject_data_lo_show(struct device *dev, 69 struct device_attribute *mattr, 70 char *data) 71 { 72 struct mem_ctl_info *mci = to_mci(dev); 73 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 74 return sprintf(data, "0x%08x", 75 in_be32(pdata->mc_vbase + 76 MPC85XX_MC_DATA_ERR_INJECT_LO)); 77 } 78 79 static ssize_t mpc85xx_mc_inject_ctrl_show(struct device *dev, 80 struct device_attribute *mattr, 81 char *data) 82 { 83 struct mem_ctl_info *mci = to_mci(dev); 84 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 85 return sprintf(data, "0x%08x", 86 in_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT)); 87 } 88 89 static ssize_t mpc85xx_mc_inject_data_hi_store(struct device *dev, 90 struct device_attribute *mattr, 91 const char *data, size_t count) 92 { 93 struct mem_ctl_info *mci = to_mci(dev); 94 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 95 if (isdigit(*data)) { 96 out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI, 97 simple_strtoul(data, NULL, 0)); 98 return count; 99 } 100 return 0; 101 } 102 103 static ssize_t mpc85xx_mc_inject_data_lo_store(struct device *dev, 104 struct device_attribute *mattr, 105 const char *data, size_t count) 106 { 107 struct mem_ctl_info *mci = to_mci(dev); 108 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 109 if (isdigit(*data)) { 110 out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO, 111 simple_strtoul(data, NULL, 0)); 112 return count; 113 } 114 return 0; 115 } 116 117 static ssize_t mpc85xx_mc_inject_ctrl_store(struct device *dev, 118 struct device_attribute *mattr, 119 const char *data, size_t count) 120 { 121 struct mem_ctl_info *mci = to_mci(dev); 122 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 123 if (isdigit(*data)) { 124 out_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT, 125 simple_strtoul(data, NULL, 0)); 126 return count; 127 } 128 return 0; 129 } 130 131 DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR, 132 mpc85xx_mc_inject_data_hi_show, mpc85xx_mc_inject_data_hi_store); 133 DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR, 134 mpc85xx_mc_inject_data_lo_show, mpc85xx_mc_inject_data_lo_store); 135 DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR, 136 mpc85xx_mc_inject_ctrl_show, mpc85xx_mc_inject_ctrl_store); 137 138 static struct attribute *mpc85xx_dev_attrs[] = { 139 &dev_attr_inject_data_hi.attr, 140 &dev_attr_inject_data_lo.attr, 141 &dev_attr_inject_ctrl.attr, 142 NULL 143 }; 144 145 ATTRIBUTE_GROUPS(mpc85xx_dev); 146 147 /**************************** PCI Err device ***************************/ 148 #ifdef CONFIG_PCI 149 150 static void mpc85xx_pci_check(struct edac_pci_ctl_info *pci) 151 { 152 struct mpc85xx_pci_pdata *pdata = pci->pvt_info; 153 u32 err_detect; 154 155 err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR); 156 157 /* master aborts can happen during PCI config cycles */ 158 if (!(err_detect & ~(PCI_EDE_MULTI_ERR | PCI_EDE_MST_ABRT))) { 159 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect); 160 return; 161 } 162 163 printk(KERN_ERR "PCI error(s) detected\n"); 164 printk(KERN_ERR "PCI/X ERR_DR register: %#08x\n", err_detect); 165 166 printk(KERN_ERR "PCI/X ERR_ATTRIB register: %#08x\n", 167 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ATTRIB)); 168 printk(KERN_ERR "PCI/X ERR_ADDR register: %#08x\n", 169 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR)); 170 printk(KERN_ERR "PCI/X ERR_EXT_ADDR register: %#08x\n", 171 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EXT_ADDR)); 172 printk(KERN_ERR "PCI/X ERR_DL register: %#08x\n", 173 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DL)); 174 printk(KERN_ERR "PCI/X ERR_DH register: %#08x\n", 175 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DH)); 176 177 /* clear error bits */ 178 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect); 179 180 if (err_detect & PCI_EDE_PERR_MASK) 181 edac_pci_handle_pe(pci, pci->ctl_name); 182 183 if ((err_detect & ~PCI_EDE_MULTI_ERR) & ~PCI_EDE_PERR_MASK) 184 edac_pci_handle_npe(pci, pci->ctl_name); 185 } 186 187 static void mpc85xx_pcie_check(struct edac_pci_ctl_info *pci) 188 { 189 struct mpc85xx_pci_pdata *pdata = pci->pvt_info; 190 u32 err_detect; 191 192 err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR); 193 194 pr_err("PCIe error(s) detected\n"); 195 pr_err("PCIe ERR_DR register: 0x%08x\n", err_detect); 196 pr_err("PCIe ERR_CAP_STAT register: 0x%08x\n", 197 in_be32(pdata->pci_vbase + MPC85XX_PCI_GAS_TIMR)); 198 pr_err("PCIe ERR_CAP_R0 register: 0x%08x\n", 199 in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R0)); 200 pr_err("PCIe ERR_CAP_R1 register: 0x%08x\n", 201 in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R1)); 202 pr_err("PCIe ERR_CAP_R2 register: 0x%08x\n", 203 in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R2)); 204 pr_err("PCIe ERR_CAP_R3 register: 0x%08x\n", 205 in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R3)); 206 207 /* clear error bits */ 208 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect); 209 } 210 211 static int mpc85xx_pcie_find_capability(struct device_node *np) 212 { 213 struct pci_controller *hose; 214 215 if (!np) 216 return -EINVAL; 217 218 hose = pci_find_hose_for_OF_device(np); 219 220 return early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP); 221 } 222 223 static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id) 224 { 225 struct edac_pci_ctl_info *pci = dev_id; 226 struct mpc85xx_pci_pdata *pdata = pci->pvt_info; 227 u32 err_detect; 228 229 err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR); 230 231 if (!err_detect) 232 return IRQ_NONE; 233 234 if (pdata->is_pcie) 235 mpc85xx_pcie_check(pci); 236 else 237 mpc85xx_pci_check(pci); 238 239 return IRQ_HANDLED; 240 } 241 242 static int mpc85xx_pci_err_probe(struct platform_device *op) 243 { 244 struct edac_pci_ctl_info *pci; 245 struct mpc85xx_pci_pdata *pdata; 246 struct mpc85xx_edac_pci_plat_data *plat_data; 247 struct device_node *of_node; 248 struct resource r; 249 int res = 0; 250 251 if (!devres_open_group(&op->dev, mpc85xx_pci_err_probe, GFP_KERNEL)) 252 return -ENOMEM; 253 254 pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mpc85xx_pci_err"); 255 if (!pci) 256 return -ENOMEM; 257 258 /* make sure error reporting method is sane */ 259 switch (edac_op_state) { 260 case EDAC_OPSTATE_POLL: 261 case EDAC_OPSTATE_INT: 262 break; 263 default: 264 edac_op_state = EDAC_OPSTATE_INT; 265 break; 266 } 267 268 pdata = pci->pvt_info; 269 pdata->name = "mpc85xx_pci_err"; 270 pdata->irq = NO_IRQ; 271 272 plat_data = op->dev.platform_data; 273 if (!plat_data) { 274 dev_err(&op->dev, "no platform data"); 275 res = -ENXIO; 276 goto err; 277 } 278 of_node = plat_data->of_node; 279 280 if (mpc85xx_pcie_find_capability(of_node) > 0) 281 pdata->is_pcie = true; 282 283 dev_set_drvdata(&op->dev, pci); 284 pci->dev = &op->dev; 285 pci->mod_name = EDAC_MOD_STR; 286 pci->ctl_name = pdata->name; 287 pci->dev_name = dev_name(&op->dev); 288 289 if (edac_op_state == EDAC_OPSTATE_POLL) { 290 if (pdata->is_pcie) 291 pci->edac_check = mpc85xx_pcie_check; 292 else 293 pci->edac_check = mpc85xx_pci_check; 294 } 295 296 pdata->edac_idx = edac_pci_idx++; 297 298 res = of_address_to_resource(of_node, 0, &r); 299 if (res) { 300 printk(KERN_ERR "%s: Unable to get resource for " 301 "PCI err regs\n", __func__); 302 goto err; 303 } 304 305 /* we only need the error registers */ 306 r.start += 0xe00; 307 308 if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r), 309 pdata->name)) { 310 printk(KERN_ERR "%s: Error while requesting mem region\n", 311 __func__); 312 res = -EBUSY; 313 goto err; 314 } 315 316 pdata->pci_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r)); 317 if (!pdata->pci_vbase) { 318 printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__); 319 res = -ENOMEM; 320 goto err; 321 } 322 323 if (pdata->is_pcie) { 324 orig_pci_err_cap_dr = 325 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR); 326 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR, ~0); 327 orig_pci_err_en = 328 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN); 329 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, 0); 330 } else { 331 orig_pci_err_cap_dr = 332 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR); 333 334 /* PCI master abort is expected during config cycles */ 335 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 0x40); 336 337 orig_pci_err_en = 338 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN); 339 340 /* disable master abort reporting */ 341 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0x40); 342 } 343 344 /* clear error bits */ 345 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0); 346 347 if (edac_pci_add_device(pci, pdata->edac_idx) > 0) { 348 edac_dbg(3, "failed edac_pci_add_device()\n"); 349 goto err; 350 } 351 352 if (edac_op_state == EDAC_OPSTATE_INT) { 353 pdata->irq = irq_of_parse_and_map(of_node, 0); 354 res = devm_request_irq(&op->dev, pdata->irq, 355 mpc85xx_pci_isr, 356 IRQF_SHARED, 357 "[EDAC] PCI err", pci); 358 if (res < 0) { 359 printk(KERN_ERR 360 "%s: Unable to request irq %d for " 361 "MPC85xx PCI err\n", __func__, pdata->irq); 362 irq_dispose_mapping(pdata->irq); 363 res = -ENODEV; 364 goto err2; 365 } 366 367 printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n", 368 pdata->irq); 369 } 370 371 if (pdata->is_pcie) { 372 /* 373 * Enable all PCIe error interrupt & error detect except invalid 374 * PEX_CONFIG_ADDR/PEX_CONFIG_DATA access interrupt generation 375 * enable bit and invalid PEX_CONFIG_ADDR/PEX_CONFIG_DATA access 376 * detection enable bit. Because PCIe bus code to initialize and 377 * configure these PCIe devices on booting will use some invalid 378 * PEX_CONFIG_ADDR/PEX_CONFIG_DATA, edac driver prints the much 379 * notice information. So disable this detect to fix ugly print. 380 */ 381 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0 382 & ~PEX_ERR_ICCAIE_EN_BIT); 383 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR, 0 384 | PEX_ERR_ICCAD_DISR_BIT); 385 } 386 387 devres_remove_group(&op->dev, mpc85xx_pci_err_probe); 388 edac_dbg(3, "success\n"); 389 printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n"); 390 391 return 0; 392 393 err2: 394 edac_pci_del_device(&op->dev); 395 err: 396 edac_pci_free_ctl_info(pci); 397 devres_release_group(&op->dev, mpc85xx_pci_err_probe); 398 return res; 399 } 400 401 static const struct platform_device_id mpc85xx_pci_err_match[] = { 402 { 403 .name = "mpc85xx-pci-edac" 404 }, 405 {} 406 }; 407 408 static struct platform_driver mpc85xx_pci_err_driver = { 409 .probe = mpc85xx_pci_err_probe, 410 .id_table = mpc85xx_pci_err_match, 411 .driver = { 412 .name = "mpc85xx_pci_err", 413 .suppress_bind_attrs = true, 414 }, 415 }; 416 #endif /* CONFIG_PCI */ 417 418 /**************************** L2 Err device ***************************/ 419 420 /************************ L2 SYSFS parts ***********************************/ 421 422 static ssize_t mpc85xx_l2_inject_data_hi_show(struct edac_device_ctl_info 423 *edac_dev, char *data) 424 { 425 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; 426 return sprintf(data, "0x%08x", 427 in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI)); 428 } 429 430 static ssize_t mpc85xx_l2_inject_data_lo_show(struct edac_device_ctl_info 431 *edac_dev, char *data) 432 { 433 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; 434 return sprintf(data, "0x%08x", 435 in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO)); 436 } 437 438 static ssize_t mpc85xx_l2_inject_ctrl_show(struct edac_device_ctl_info 439 *edac_dev, char *data) 440 { 441 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; 442 return sprintf(data, "0x%08x", 443 in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL)); 444 } 445 446 static ssize_t mpc85xx_l2_inject_data_hi_store(struct edac_device_ctl_info 447 *edac_dev, const char *data, 448 size_t count) 449 { 450 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; 451 if (isdigit(*data)) { 452 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI, 453 simple_strtoul(data, NULL, 0)); 454 return count; 455 } 456 return 0; 457 } 458 459 static ssize_t mpc85xx_l2_inject_data_lo_store(struct edac_device_ctl_info 460 *edac_dev, const char *data, 461 size_t count) 462 { 463 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; 464 if (isdigit(*data)) { 465 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO, 466 simple_strtoul(data, NULL, 0)); 467 return count; 468 } 469 return 0; 470 } 471 472 static ssize_t mpc85xx_l2_inject_ctrl_store(struct edac_device_ctl_info 473 *edac_dev, const char *data, 474 size_t count) 475 { 476 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; 477 if (isdigit(*data)) { 478 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL, 479 simple_strtoul(data, NULL, 0)); 480 return count; 481 } 482 return 0; 483 } 484 485 static struct edac_dev_sysfs_attribute mpc85xx_l2_sysfs_attributes[] = { 486 { 487 .attr = { 488 .name = "inject_data_hi", 489 .mode = (S_IRUGO | S_IWUSR) 490 }, 491 .show = mpc85xx_l2_inject_data_hi_show, 492 .store = mpc85xx_l2_inject_data_hi_store}, 493 { 494 .attr = { 495 .name = "inject_data_lo", 496 .mode = (S_IRUGO | S_IWUSR) 497 }, 498 .show = mpc85xx_l2_inject_data_lo_show, 499 .store = mpc85xx_l2_inject_data_lo_store}, 500 { 501 .attr = { 502 .name = "inject_ctrl", 503 .mode = (S_IRUGO | S_IWUSR) 504 }, 505 .show = mpc85xx_l2_inject_ctrl_show, 506 .store = mpc85xx_l2_inject_ctrl_store}, 507 508 /* End of list */ 509 { 510 .attr = {.name = NULL} 511 } 512 }; 513 514 static void mpc85xx_set_l2_sysfs_attributes(struct edac_device_ctl_info 515 *edac_dev) 516 { 517 edac_dev->sysfs_attributes = mpc85xx_l2_sysfs_attributes; 518 } 519 520 /***************************** L2 ops ***********************************/ 521 522 static void mpc85xx_l2_check(struct edac_device_ctl_info *edac_dev) 523 { 524 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; 525 u32 err_detect; 526 527 err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET); 528 529 if (!(err_detect & L2_EDE_MASK)) 530 return; 531 532 printk(KERN_ERR "ECC Error in CPU L2 cache\n"); 533 printk(KERN_ERR "L2 Error Detect Register: 0x%08x\n", err_detect); 534 printk(KERN_ERR "L2 Error Capture Data High Register: 0x%08x\n", 535 in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATAHI)); 536 printk(KERN_ERR "L2 Error Capture Data Lo Register: 0x%08x\n", 537 in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATALO)); 538 printk(KERN_ERR "L2 Error Syndrome Register: 0x%08x\n", 539 in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTECC)); 540 printk(KERN_ERR "L2 Error Attributes Capture Register: 0x%08x\n", 541 in_be32(pdata->l2_vbase + MPC85XX_L2_ERRATTR)); 542 printk(KERN_ERR "L2 Error Address Capture Register: 0x%08x\n", 543 in_be32(pdata->l2_vbase + MPC85XX_L2_ERRADDR)); 544 545 /* clear error detect register */ 546 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, err_detect); 547 548 if (err_detect & L2_EDE_CE_MASK) 549 edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name); 550 551 if (err_detect & L2_EDE_UE_MASK) 552 edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name); 553 } 554 555 static irqreturn_t mpc85xx_l2_isr(int irq, void *dev_id) 556 { 557 struct edac_device_ctl_info *edac_dev = dev_id; 558 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; 559 u32 err_detect; 560 561 err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET); 562 563 if (!(err_detect & L2_EDE_MASK)) 564 return IRQ_NONE; 565 566 mpc85xx_l2_check(edac_dev); 567 568 return IRQ_HANDLED; 569 } 570 571 static int mpc85xx_l2_err_probe(struct platform_device *op) 572 { 573 struct edac_device_ctl_info *edac_dev; 574 struct mpc85xx_l2_pdata *pdata; 575 struct resource r; 576 int res; 577 578 if (!devres_open_group(&op->dev, mpc85xx_l2_err_probe, GFP_KERNEL)) 579 return -ENOMEM; 580 581 edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata), 582 "cpu", 1, "L", 1, 2, NULL, 0, 583 edac_dev_idx); 584 if (!edac_dev) { 585 devres_release_group(&op->dev, mpc85xx_l2_err_probe); 586 return -ENOMEM; 587 } 588 589 pdata = edac_dev->pvt_info; 590 pdata->name = "mpc85xx_l2_err"; 591 pdata->irq = NO_IRQ; 592 edac_dev->dev = &op->dev; 593 dev_set_drvdata(edac_dev->dev, edac_dev); 594 edac_dev->ctl_name = pdata->name; 595 edac_dev->dev_name = pdata->name; 596 597 res = of_address_to_resource(op->dev.of_node, 0, &r); 598 if (res) { 599 printk(KERN_ERR "%s: Unable to get resource for " 600 "L2 err regs\n", __func__); 601 goto err; 602 } 603 604 /* we only need the error registers */ 605 r.start += 0xe00; 606 607 if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r), 608 pdata->name)) { 609 printk(KERN_ERR "%s: Error while requesting mem region\n", 610 __func__); 611 res = -EBUSY; 612 goto err; 613 } 614 615 pdata->l2_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r)); 616 if (!pdata->l2_vbase) { 617 printk(KERN_ERR "%s: Unable to setup L2 err regs\n", __func__); 618 res = -ENOMEM; 619 goto err; 620 } 621 622 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, ~0); 623 624 orig_l2_err_disable = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS); 625 626 /* clear the err_dis */ 627 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, 0); 628 629 edac_dev->mod_name = EDAC_MOD_STR; 630 631 if (edac_op_state == EDAC_OPSTATE_POLL) 632 edac_dev->edac_check = mpc85xx_l2_check; 633 634 mpc85xx_set_l2_sysfs_attributes(edac_dev); 635 636 pdata->edac_idx = edac_dev_idx++; 637 638 if (edac_device_add_device(edac_dev) > 0) { 639 edac_dbg(3, "failed edac_device_add_device()\n"); 640 goto err; 641 } 642 643 if (edac_op_state == EDAC_OPSTATE_INT) { 644 pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0); 645 res = devm_request_irq(&op->dev, pdata->irq, 646 mpc85xx_l2_isr, IRQF_SHARED, 647 "[EDAC] L2 err", edac_dev); 648 if (res < 0) { 649 printk(KERN_ERR 650 "%s: Unable to request irq %d for " 651 "MPC85xx L2 err\n", __func__, pdata->irq); 652 irq_dispose_mapping(pdata->irq); 653 res = -ENODEV; 654 goto err2; 655 } 656 657 printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for L2 Err\n", 658 pdata->irq); 659 660 edac_dev->op_state = OP_RUNNING_INTERRUPT; 661 662 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, L2_EIE_MASK); 663 } 664 665 devres_remove_group(&op->dev, mpc85xx_l2_err_probe); 666 667 edac_dbg(3, "success\n"); 668 printk(KERN_INFO EDAC_MOD_STR " L2 err registered\n"); 669 670 return 0; 671 672 err2: 673 edac_device_del_device(&op->dev); 674 err: 675 devres_release_group(&op->dev, mpc85xx_l2_err_probe); 676 edac_device_free_ctl_info(edac_dev); 677 return res; 678 } 679 680 static int mpc85xx_l2_err_remove(struct platform_device *op) 681 { 682 struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev); 683 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; 684 685 edac_dbg(0, "\n"); 686 687 if (edac_op_state == EDAC_OPSTATE_INT) { 688 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, 0); 689 irq_dispose_mapping(pdata->irq); 690 } 691 692 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, orig_l2_err_disable); 693 edac_device_del_device(&op->dev); 694 edac_device_free_ctl_info(edac_dev); 695 return 0; 696 } 697 698 static const struct of_device_id mpc85xx_l2_err_of_match[] = { 699 /* deprecate the fsl,85.. forms in the future, 2.6.30? */ 700 { .compatible = "fsl,8540-l2-cache-controller", }, 701 { .compatible = "fsl,8541-l2-cache-controller", }, 702 { .compatible = "fsl,8544-l2-cache-controller", }, 703 { .compatible = "fsl,8548-l2-cache-controller", }, 704 { .compatible = "fsl,8555-l2-cache-controller", }, 705 { .compatible = "fsl,8568-l2-cache-controller", }, 706 { .compatible = "fsl,mpc8536-l2-cache-controller", }, 707 { .compatible = "fsl,mpc8540-l2-cache-controller", }, 708 { .compatible = "fsl,mpc8541-l2-cache-controller", }, 709 { .compatible = "fsl,mpc8544-l2-cache-controller", }, 710 { .compatible = "fsl,mpc8548-l2-cache-controller", }, 711 { .compatible = "fsl,mpc8555-l2-cache-controller", }, 712 { .compatible = "fsl,mpc8560-l2-cache-controller", }, 713 { .compatible = "fsl,mpc8568-l2-cache-controller", }, 714 { .compatible = "fsl,mpc8569-l2-cache-controller", }, 715 { .compatible = "fsl,mpc8572-l2-cache-controller", }, 716 { .compatible = "fsl,p1020-l2-cache-controller", }, 717 { .compatible = "fsl,p1021-l2-cache-controller", }, 718 { .compatible = "fsl,p2020-l2-cache-controller", }, 719 {}, 720 }; 721 MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match); 722 723 static struct platform_driver mpc85xx_l2_err_driver = { 724 .probe = mpc85xx_l2_err_probe, 725 .remove = mpc85xx_l2_err_remove, 726 .driver = { 727 .name = "mpc85xx_l2_err", 728 .of_match_table = mpc85xx_l2_err_of_match, 729 }, 730 }; 731 732 /**************************** MC Err device ***************************/ 733 734 /* 735 * Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the 736 * MPC8572 User's Manual. Each line represents a syndrome bit column as a 737 * 64-bit value, but split into an upper and lower 32-bit chunk. The labels 738 * below correspond to Freescale's manuals. 739 */ 740 static unsigned int ecc_table[16] = { 741 /* MSB LSB */ 742 /* [0:31] [32:63] */ 743 0xf00fe11e, 0xc33c0ff7, /* Syndrome bit 7 */ 744 0x00ff00ff, 0x00fff0ff, 745 0x0f0f0f0f, 0x0f0fff00, 746 0x11113333, 0x7777000f, 747 0x22224444, 0x8888222f, 748 0x44448888, 0xffff4441, 749 0x8888ffff, 0x11118882, 750 0xffff1111, 0x22221114, /* Syndrome bit 0 */ 751 }; 752 753 /* 754 * Calculate the correct ECC value for a 64-bit value specified by high:low 755 */ 756 static u8 calculate_ecc(u32 high, u32 low) 757 { 758 u32 mask_low; 759 u32 mask_high; 760 int bit_cnt; 761 u8 ecc = 0; 762 int i; 763 int j; 764 765 for (i = 0; i < 8; i++) { 766 mask_high = ecc_table[i * 2]; 767 mask_low = ecc_table[i * 2 + 1]; 768 bit_cnt = 0; 769 770 for (j = 0; j < 32; j++) { 771 if ((mask_high >> j) & 1) 772 bit_cnt ^= (high >> j) & 1; 773 if ((mask_low >> j) & 1) 774 bit_cnt ^= (low >> j) & 1; 775 } 776 777 ecc |= bit_cnt << i; 778 } 779 780 return ecc; 781 } 782 783 /* 784 * Create the syndrome code which is generated if the data line specified by 785 * 'bit' failed. Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641 786 * User's Manual and 9-61 in the MPC8572 User's Manual. 787 */ 788 static u8 syndrome_from_bit(unsigned int bit) { 789 int i; 790 u8 syndrome = 0; 791 792 /* 793 * Cycle through the upper or lower 32-bit portion of each value in 794 * ecc_table depending on if 'bit' is in the upper or lower half of 795 * 64-bit data. 796 */ 797 for (i = bit < 32; i < 16; i += 2) 798 syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2); 799 800 return syndrome; 801 } 802 803 /* 804 * Decode data and ecc syndrome to determine what went wrong 805 * Note: This can only decode single-bit errors 806 */ 807 static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc, 808 int *bad_data_bit, int *bad_ecc_bit) 809 { 810 int i; 811 u8 syndrome; 812 813 *bad_data_bit = -1; 814 *bad_ecc_bit = -1; 815 816 /* 817 * Calculate the ECC of the captured data and XOR it with the captured 818 * ECC to find an ECC syndrome value we can search for 819 */ 820 syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc; 821 822 /* Check if a data line is stuck... */ 823 for (i = 0; i < 64; i++) { 824 if (syndrome == syndrome_from_bit(i)) { 825 *bad_data_bit = i; 826 return; 827 } 828 } 829 830 /* If data is correct, check ECC bits for errors... */ 831 for (i = 0; i < 8; i++) { 832 if ((syndrome >> i) & 0x1) { 833 *bad_ecc_bit = i; 834 return; 835 } 836 } 837 } 838 839 #define make64(high, low) (((u64)(high) << 32) | (low)) 840 841 static void mpc85xx_mc_check(struct mem_ctl_info *mci) 842 { 843 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 844 struct csrow_info *csrow; 845 u32 bus_width; 846 u32 err_detect; 847 u32 syndrome; 848 u64 err_addr; 849 u32 pfn; 850 int row_index; 851 u32 cap_high; 852 u32 cap_low; 853 int bad_data_bit; 854 int bad_ecc_bit; 855 856 err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT); 857 if (!err_detect) 858 return; 859 860 mpc85xx_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n", 861 err_detect); 862 863 /* no more processing if not ECC bit errors */ 864 if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) { 865 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect); 866 return; 867 } 868 869 syndrome = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ECC); 870 871 /* Mask off appropriate bits of syndrome based on bus width */ 872 bus_width = (in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG) & 873 DSC_DBW_MASK) ? 32 : 64; 874 if (bus_width == 64) 875 syndrome &= 0xff; 876 else 877 syndrome &= 0xffff; 878 879 err_addr = make64( 880 in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_EXT_ADDRESS), 881 in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ADDRESS)); 882 pfn = err_addr >> PAGE_SHIFT; 883 884 for (row_index = 0; row_index < mci->nr_csrows; row_index++) { 885 csrow = mci->csrows[row_index]; 886 if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page)) 887 break; 888 } 889 890 cap_high = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_DATA_HI); 891 cap_low = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_DATA_LO); 892 893 /* 894 * Analyze single-bit errors on 64-bit wide buses 895 * TODO: Add support for 32-bit wide buses 896 */ 897 if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) { 898 sbe_ecc_decode(cap_high, cap_low, syndrome, 899 &bad_data_bit, &bad_ecc_bit); 900 901 if (bad_data_bit != -1) 902 mpc85xx_mc_printk(mci, KERN_ERR, 903 "Faulty Data bit: %d\n", bad_data_bit); 904 if (bad_ecc_bit != -1) 905 mpc85xx_mc_printk(mci, KERN_ERR, 906 "Faulty ECC bit: %d\n", bad_ecc_bit); 907 908 mpc85xx_mc_printk(mci, KERN_ERR, 909 "Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n", 910 cap_high ^ (1 << (bad_data_bit - 32)), 911 cap_low ^ (1 << bad_data_bit), 912 syndrome ^ (1 << bad_ecc_bit)); 913 } 914 915 mpc85xx_mc_printk(mci, KERN_ERR, 916 "Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n", 917 cap_high, cap_low, syndrome); 918 mpc85xx_mc_printk(mci, KERN_ERR, "Err addr: %#8.8llx\n", err_addr); 919 mpc85xx_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn); 920 921 /* we are out of range */ 922 if (row_index == mci->nr_csrows) 923 mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n"); 924 925 if (err_detect & DDR_EDE_SBE) 926 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 927 pfn, err_addr & ~PAGE_MASK, syndrome, 928 row_index, 0, -1, 929 mci->ctl_name, ""); 930 931 if (err_detect & DDR_EDE_MBE) 932 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 933 pfn, err_addr & ~PAGE_MASK, syndrome, 934 row_index, 0, -1, 935 mci->ctl_name, ""); 936 937 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect); 938 } 939 940 static irqreturn_t mpc85xx_mc_isr(int irq, void *dev_id) 941 { 942 struct mem_ctl_info *mci = dev_id; 943 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 944 u32 err_detect; 945 946 err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT); 947 if (!err_detect) 948 return IRQ_NONE; 949 950 mpc85xx_mc_check(mci); 951 952 return IRQ_HANDLED; 953 } 954 955 static void mpc85xx_init_csrows(struct mem_ctl_info *mci) 956 { 957 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 958 struct csrow_info *csrow; 959 struct dimm_info *dimm; 960 u32 sdram_ctl; 961 u32 sdtype; 962 enum mem_type mtype; 963 u32 cs_bnds; 964 int index; 965 966 sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG); 967 968 sdtype = sdram_ctl & DSC_SDTYPE_MASK; 969 if (sdram_ctl & DSC_RD_EN) { 970 switch (sdtype) { 971 case DSC_SDTYPE_DDR: 972 mtype = MEM_RDDR; 973 break; 974 case DSC_SDTYPE_DDR2: 975 mtype = MEM_RDDR2; 976 break; 977 case DSC_SDTYPE_DDR3: 978 mtype = MEM_RDDR3; 979 break; 980 default: 981 mtype = MEM_UNKNOWN; 982 break; 983 } 984 } else { 985 switch (sdtype) { 986 case DSC_SDTYPE_DDR: 987 mtype = MEM_DDR; 988 break; 989 case DSC_SDTYPE_DDR2: 990 mtype = MEM_DDR2; 991 break; 992 case DSC_SDTYPE_DDR3: 993 mtype = MEM_DDR3; 994 break; 995 default: 996 mtype = MEM_UNKNOWN; 997 break; 998 } 999 } 1000 1001 for (index = 0; index < mci->nr_csrows; index++) { 1002 u32 start; 1003 u32 end; 1004 1005 csrow = mci->csrows[index]; 1006 dimm = csrow->channels[0]->dimm; 1007 1008 cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 + 1009 (index * MPC85XX_MC_CS_BNDS_OFS)); 1010 1011 start = (cs_bnds & 0xffff0000) >> 16; 1012 end = (cs_bnds & 0x0000ffff); 1013 1014 if (start == end) 1015 continue; /* not populated */ 1016 1017 start <<= (24 - PAGE_SHIFT); 1018 end <<= (24 - PAGE_SHIFT); 1019 end |= (1 << (24 - PAGE_SHIFT)) - 1; 1020 1021 csrow->first_page = start; 1022 csrow->last_page = end; 1023 1024 dimm->nr_pages = end + 1 - start; 1025 dimm->grain = 8; 1026 dimm->mtype = mtype; 1027 dimm->dtype = DEV_UNKNOWN; 1028 if (sdram_ctl & DSC_X32_EN) 1029 dimm->dtype = DEV_X32; 1030 dimm->edac_mode = EDAC_SECDED; 1031 } 1032 } 1033 1034 static int mpc85xx_mc_err_probe(struct platform_device *op) 1035 { 1036 struct mem_ctl_info *mci; 1037 struct edac_mc_layer layers[2]; 1038 struct mpc85xx_mc_pdata *pdata; 1039 struct resource r; 1040 u32 sdram_ctl; 1041 int res; 1042 1043 if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL)) 1044 return -ENOMEM; 1045 1046 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; 1047 layers[0].size = 4; 1048 layers[0].is_virt_csrow = true; 1049 layers[1].type = EDAC_MC_LAYER_CHANNEL; 1050 layers[1].size = 1; 1051 layers[1].is_virt_csrow = false; 1052 mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers, 1053 sizeof(*pdata)); 1054 if (!mci) { 1055 devres_release_group(&op->dev, mpc85xx_mc_err_probe); 1056 return -ENOMEM; 1057 } 1058 1059 pdata = mci->pvt_info; 1060 pdata->name = "mpc85xx_mc_err"; 1061 pdata->irq = NO_IRQ; 1062 mci->pdev = &op->dev; 1063 pdata->edac_idx = edac_mc_idx++; 1064 dev_set_drvdata(mci->pdev, mci); 1065 mci->ctl_name = pdata->name; 1066 mci->dev_name = pdata->name; 1067 1068 res = of_address_to_resource(op->dev.of_node, 0, &r); 1069 if (res) { 1070 printk(KERN_ERR "%s: Unable to get resource for MC err regs\n", 1071 __func__); 1072 goto err; 1073 } 1074 1075 if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r), 1076 pdata->name)) { 1077 printk(KERN_ERR "%s: Error while requesting mem region\n", 1078 __func__); 1079 res = -EBUSY; 1080 goto err; 1081 } 1082 1083 pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r)); 1084 if (!pdata->mc_vbase) { 1085 printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__); 1086 res = -ENOMEM; 1087 goto err; 1088 } 1089 1090 sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG); 1091 if (!(sdram_ctl & DSC_ECC_EN)) { 1092 /* no ECC */ 1093 printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__); 1094 res = -ENODEV; 1095 goto err; 1096 } 1097 1098 edac_dbg(3, "init mci\n"); 1099 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 | 1100 MEM_FLAG_DDR | MEM_FLAG_DDR2; 1101 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 1102 mci->edac_cap = EDAC_FLAG_SECDED; 1103 mci->mod_name = EDAC_MOD_STR; 1104 mci->mod_ver = MPC85XX_REVISION; 1105 1106 if (edac_op_state == EDAC_OPSTATE_POLL) 1107 mci->edac_check = mpc85xx_mc_check; 1108 1109 mci->ctl_page_to_phys = NULL; 1110 1111 mci->scrub_mode = SCRUB_SW_SRC; 1112 1113 mpc85xx_init_csrows(mci); 1114 1115 /* store the original error disable bits */ 1116 orig_ddr_err_disable = 1117 in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE); 1118 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE, 0); 1119 1120 /* clear all error bits */ 1121 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, ~0); 1122 1123 if (edac_mc_add_mc_with_groups(mci, mpc85xx_dev_groups)) { 1124 edac_dbg(3, "failed edac_mc_add_mc()\n"); 1125 goto err; 1126 } 1127 1128 if (edac_op_state == EDAC_OPSTATE_INT) { 1129 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 1130 DDR_EIE_MBEE | DDR_EIE_SBEE); 1131 1132 /* store the original error management threshold */ 1133 orig_ddr_err_sbe = in_be32(pdata->mc_vbase + 1134 MPC85XX_MC_ERR_SBE) & 0xff0000; 1135 1136 /* set threshold to 1 error per interrupt */ 1137 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, 0x10000); 1138 1139 /* register interrupts */ 1140 pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0); 1141 res = devm_request_irq(&op->dev, pdata->irq, 1142 mpc85xx_mc_isr, 1143 IRQF_SHARED, 1144 "[EDAC] MC err", mci); 1145 if (res < 0) { 1146 printk(KERN_ERR "%s: Unable to request irq %d for " 1147 "MPC85xx DRAM ERR\n", __func__, pdata->irq); 1148 irq_dispose_mapping(pdata->irq); 1149 res = -ENODEV; 1150 goto err2; 1151 } 1152 1153 printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC\n", 1154 pdata->irq); 1155 } 1156 1157 devres_remove_group(&op->dev, mpc85xx_mc_err_probe); 1158 edac_dbg(3, "success\n"); 1159 printk(KERN_INFO EDAC_MOD_STR " MC err registered\n"); 1160 1161 return 0; 1162 1163 err2: 1164 edac_mc_del_mc(&op->dev); 1165 err: 1166 devres_release_group(&op->dev, mpc85xx_mc_err_probe); 1167 edac_mc_free(mci); 1168 return res; 1169 } 1170 1171 static int mpc85xx_mc_err_remove(struct platform_device *op) 1172 { 1173 struct mem_ctl_info *mci = dev_get_drvdata(&op->dev); 1174 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 1175 1176 edac_dbg(0, "\n"); 1177 1178 if (edac_op_state == EDAC_OPSTATE_INT) { 1179 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 0); 1180 irq_dispose_mapping(pdata->irq); 1181 } 1182 1183 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE, 1184 orig_ddr_err_disable); 1185 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, orig_ddr_err_sbe); 1186 1187 edac_mc_del_mc(&op->dev); 1188 edac_mc_free(mci); 1189 return 0; 1190 } 1191 1192 static const struct of_device_id mpc85xx_mc_err_of_match[] = { 1193 /* deprecate the fsl,85.. forms in the future, 2.6.30? */ 1194 { .compatible = "fsl,8540-memory-controller", }, 1195 { .compatible = "fsl,8541-memory-controller", }, 1196 { .compatible = "fsl,8544-memory-controller", }, 1197 { .compatible = "fsl,8548-memory-controller", }, 1198 { .compatible = "fsl,8555-memory-controller", }, 1199 { .compatible = "fsl,8568-memory-controller", }, 1200 { .compatible = "fsl,mpc8536-memory-controller", }, 1201 { .compatible = "fsl,mpc8540-memory-controller", }, 1202 { .compatible = "fsl,mpc8541-memory-controller", }, 1203 { .compatible = "fsl,mpc8544-memory-controller", }, 1204 { .compatible = "fsl,mpc8548-memory-controller", }, 1205 { .compatible = "fsl,mpc8555-memory-controller", }, 1206 { .compatible = "fsl,mpc8560-memory-controller", }, 1207 { .compatible = "fsl,mpc8568-memory-controller", }, 1208 { .compatible = "fsl,mpc8569-memory-controller", }, 1209 { .compatible = "fsl,mpc8572-memory-controller", }, 1210 { .compatible = "fsl,mpc8349-memory-controller", }, 1211 { .compatible = "fsl,p1020-memory-controller", }, 1212 { .compatible = "fsl,p1021-memory-controller", }, 1213 { .compatible = "fsl,p2020-memory-controller", }, 1214 { .compatible = "fsl,qoriq-memory-controller", }, 1215 {}, 1216 }; 1217 MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match); 1218 1219 static struct platform_driver mpc85xx_mc_err_driver = { 1220 .probe = mpc85xx_mc_err_probe, 1221 .remove = mpc85xx_mc_err_remove, 1222 .driver = { 1223 .name = "mpc85xx_mc_err", 1224 .of_match_table = mpc85xx_mc_err_of_match, 1225 }, 1226 }; 1227 1228 #ifdef CONFIG_FSL_SOC_BOOKE 1229 static void __init mpc85xx_mc_clear_rfxe(void *data) 1230 { 1231 orig_hid1[smp_processor_id()] = mfspr(SPRN_HID1); 1232 mtspr(SPRN_HID1, (orig_hid1[smp_processor_id()] & ~HID1_RFXE)); 1233 } 1234 #endif 1235 1236 static struct platform_driver * const drivers[] = { 1237 &mpc85xx_mc_err_driver, 1238 &mpc85xx_l2_err_driver, 1239 #ifdef CONFIG_PCI 1240 &mpc85xx_pci_err_driver, 1241 #endif 1242 }; 1243 1244 static int __init mpc85xx_mc_init(void) 1245 { 1246 int res = 0; 1247 u32 __maybe_unused pvr = 0; 1248 1249 printk(KERN_INFO "Freescale(R) MPC85xx EDAC driver, " 1250 "(C) 2006 Montavista Software\n"); 1251 1252 /* make sure error reporting method is sane */ 1253 switch (edac_op_state) { 1254 case EDAC_OPSTATE_POLL: 1255 case EDAC_OPSTATE_INT: 1256 break; 1257 default: 1258 edac_op_state = EDAC_OPSTATE_INT; 1259 break; 1260 } 1261 1262 res = platform_register_drivers(drivers, ARRAY_SIZE(drivers)); 1263 if (res) 1264 printk(KERN_WARNING EDAC_MOD_STR "drivers fail to register\n"); 1265 1266 #ifdef CONFIG_FSL_SOC_BOOKE 1267 pvr = mfspr(SPRN_PVR); 1268 1269 if ((PVR_VER(pvr) == PVR_VER_E500V1) || 1270 (PVR_VER(pvr) == PVR_VER_E500V2)) { 1271 /* 1272 * need to clear HID1[RFXE] to disable machine check int 1273 * so we can catch it 1274 */ 1275 if (edac_op_state == EDAC_OPSTATE_INT) 1276 on_each_cpu(mpc85xx_mc_clear_rfxe, NULL, 0); 1277 } 1278 #endif 1279 1280 return 0; 1281 } 1282 1283 module_init(mpc85xx_mc_init); 1284 1285 #ifdef CONFIG_FSL_SOC_BOOKE 1286 static void __exit mpc85xx_mc_restore_hid1(void *data) 1287 { 1288 mtspr(SPRN_HID1, orig_hid1[smp_processor_id()]); 1289 } 1290 #endif 1291 1292 static void __exit mpc85xx_mc_exit(void) 1293 { 1294 #ifdef CONFIG_FSL_SOC_BOOKE 1295 u32 pvr = mfspr(SPRN_PVR); 1296 1297 if ((PVR_VER(pvr) == PVR_VER_E500V1) || 1298 (PVR_VER(pvr) == PVR_VER_E500V2)) { 1299 on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0); 1300 } 1301 #endif 1302 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); 1303 } 1304 1305 module_exit(mpc85xx_mc_exit); 1306 1307 MODULE_LICENSE("GPL"); 1308 MODULE_AUTHOR("Montavista Software, Inc."); 1309 module_param(edac_op_state, int, 0444); 1310 MODULE_PARM_DESC(edac_op_state, 1311 "EDAC Error Reporting state: 0=Poll, 2=Interrupt"); 1312