1 /* 2 * edac_mc kernel module 3 * (C) 2005 Linux Networx (http://lnxi.com) 4 * This file may be distributed under the terms of the 5 * GNU General Public License. 6 * 7 * Written by Thayne Harbaugh 8 * Based on work by Dan Hollis <goemon at anime dot net> and others. 9 * http://www.anime.net/~goemon/linux-ecc/ 10 * 11 * Modified by Dave Peterson and Doug Thompson 12 * 13 */ 14 15 #include <linux/module.h> 16 #include <linux/proc_fs.h> 17 #include <linux/kernel.h> 18 #include <linux/types.h> 19 #include <linux/smp.h> 20 #include <linux/init.h> 21 #include <linux/sysctl.h> 22 #include <linux/highmem.h> 23 #include <linux/timer.h> 24 #include <linux/slab.h> 25 #include <linux/jiffies.h> 26 #include <linux/spinlock.h> 27 #include <linux/list.h> 28 #include <linux/sysdev.h> 29 #include <linux/ctype.h> 30 #include <linux/kthread.h> 31 #include <asm/uaccess.h> 32 #include <asm/page.h> 33 #include <asm/edac.h> 34 #include "edac_mc.h" 35 36 #define EDAC_MC_VERSION "Ver: 2.0.0 " __DATE__ 37 38 /* For now, disable the EDAC sysfs code. The sysfs interface that EDAC 39 * presents to user space needs more thought, and is likely to change 40 * substantially. 41 */ 42 #define DISABLE_EDAC_SYSFS 43 44 #ifdef CONFIG_EDAC_DEBUG 45 /* Values of 0 to 4 will generate output */ 46 int edac_debug_level = 1; 47 EXPORT_SYMBOL_GPL(edac_debug_level); 48 #endif 49 50 /* EDAC Controls, setable by module parameter, and sysfs */ 51 static int log_ue = 1; 52 static int log_ce = 1; 53 static int panic_on_ue; 54 static int poll_msec = 1000; 55 56 /* lock to memory controller's control array */ 57 static DECLARE_MUTEX(mem_ctls_mutex); 58 static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices); 59 60 static struct task_struct *edac_thread; 61 62 #ifdef CONFIG_PCI 63 static int check_pci_parity = 0; /* default YES check PCI parity */ 64 static int panic_on_pci_parity; /* default no panic on PCI Parity */ 65 static atomic_t pci_parity_count = ATOMIC_INIT(0); 66 67 /* Structure of the whitelist and blacklist arrays */ 68 struct edac_pci_device_list { 69 unsigned int vendor; /* Vendor ID */ 70 unsigned int device; /* Deviice ID */ 71 }; 72 73 #define MAX_LISTED_PCI_DEVICES 32 74 75 /* List of PCI devices (vendor-id:device-id) that should be skipped */ 76 static struct edac_pci_device_list pci_blacklist[MAX_LISTED_PCI_DEVICES]; 77 static int pci_blacklist_count; 78 79 /* List of PCI devices (vendor-id:device-id) that should be scanned */ 80 static struct edac_pci_device_list pci_whitelist[MAX_LISTED_PCI_DEVICES]; 81 static int pci_whitelist_count ; 82 83 #ifndef DISABLE_EDAC_SYSFS 84 static struct kobject edac_pci_kobj; /* /sys/devices/system/edac/pci */ 85 static struct completion edac_pci_kobj_complete; 86 #endif /* DISABLE_EDAC_SYSFS */ 87 #endif /* CONFIG_PCI */ 88 89 /* START sysfs data and methods */ 90 91 #ifndef DISABLE_EDAC_SYSFS 92 93 static const char *mem_types[] = { 94 [MEM_EMPTY] = "Empty", 95 [MEM_RESERVED] = "Reserved", 96 [MEM_UNKNOWN] = "Unknown", 97 [MEM_FPM] = "FPM", 98 [MEM_EDO] = "EDO", 99 [MEM_BEDO] = "BEDO", 100 [MEM_SDR] = "Unbuffered-SDR", 101 [MEM_RDR] = "Registered-SDR", 102 [MEM_DDR] = "Unbuffered-DDR", 103 [MEM_RDDR] = "Registered-DDR", 104 [MEM_RMBS] = "RMBS" 105 }; 106 107 static const char *dev_types[] = { 108 [DEV_UNKNOWN] = "Unknown", 109 [DEV_X1] = "x1", 110 [DEV_X2] = "x2", 111 [DEV_X4] = "x4", 112 [DEV_X8] = "x8", 113 [DEV_X16] = "x16", 114 [DEV_X32] = "x32", 115 [DEV_X64] = "x64" 116 }; 117 118 static const char *edac_caps[] = { 119 [EDAC_UNKNOWN] = "Unknown", 120 [EDAC_NONE] = "None", 121 [EDAC_RESERVED] = "Reserved", 122 [EDAC_PARITY] = "PARITY", 123 [EDAC_EC] = "EC", 124 [EDAC_SECDED] = "SECDED", 125 [EDAC_S2ECD2ED] = "S2ECD2ED", 126 [EDAC_S4ECD4ED] = "S4ECD4ED", 127 [EDAC_S8ECD8ED] = "S8ECD8ED", 128 [EDAC_S16ECD16ED] = "S16ECD16ED" 129 }; 130 131 /* sysfs object: /sys/devices/system/edac */ 132 static struct sysdev_class edac_class = { 133 set_kset_name("edac"), 134 }; 135 136 /* sysfs object: 137 * /sys/devices/system/edac/mc 138 */ 139 static struct kobject edac_memctrl_kobj; 140 141 /* We use these to wait for the reference counts on edac_memctrl_kobj and 142 * edac_pci_kobj to reach 0. 143 */ 144 static struct completion edac_memctrl_kobj_complete; 145 146 /* 147 * /sys/devices/system/edac/mc; 148 * data structures and methods 149 */ 150 #if 0 151 static ssize_t memctrl_string_show(void *ptr, char *buffer) 152 { 153 char *value = (char*) ptr; 154 return sprintf(buffer, "%s\n", value); 155 } 156 #endif 157 158 static ssize_t memctrl_int_show(void *ptr, char *buffer) 159 { 160 int *value = (int*) ptr; 161 return sprintf(buffer, "%d\n", *value); 162 } 163 164 static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count) 165 { 166 int *value = (int*) ptr; 167 168 if (isdigit(*buffer)) 169 *value = simple_strtoul(buffer, NULL, 0); 170 171 return count; 172 } 173 174 struct memctrl_dev_attribute { 175 struct attribute attr; 176 void *value; 177 ssize_t (*show)(void *,char *); 178 ssize_t (*store)(void *, const char *, size_t); 179 }; 180 181 /* Set of show/store abstract level functions for memory control object */ 182 static ssize_t memctrl_dev_show(struct kobject *kobj, 183 struct attribute *attr, char *buffer) 184 { 185 struct memctrl_dev_attribute *memctrl_dev; 186 memctrl_dev = (struct memctrl_dev_attribute*)attr; 187 188 if (memctrl_dev->show) 189 return memctrl_dev->show(memctrl_dev->value, buffer); 190 191 return -EIO; 192 } 193 194 static ssize_t memctrl_dev_store(struct kobject *kobj, struct attribute *attr, 195 const char *buffer, size_t count) 196 { 197 struct memctrl_dev_attribute *memctrl_dev; 198 memctrl_dev = (struct memctrl_dev_attribute*)attr; 199 200 if (memctrl_dev->store) 201 return memctrl_dev->store(memctrl_dev->value, buffer, count); 202 203 return -EIO; 204 } 205 206 static struct sysfs_ops memctrlfs_ops = { 207 .show = memctrl_dev_show, 208 .store = memctrl_dev_store 209 }; 210 211 #define MEMCTRL_ATTR(_name,_mode,_show,_store) \ 212 struct memctrl_dev_attribute attr_##_name = { \ 213 .attr = {.name = __stringify(_name), .mode = _mode }, \ 214 .value = &_name, \ 215 .show = _show, \ 216 .store = _store, \ 217 }; 218 219 #define MEMCTRL_STRING_ATTR(_name,_data,_mode,_show,_store) \ 220 struct memctrl_dev_attribute attr_##_name = { \ 221 .attr = {.name = __stringify(_name), .mode = _mode }, \ 222 .value = _data, \ 223 .show = _show, \ 224 .store = _store, \ 225 }; 226 227 /* cwrow<id> attribute f*/ 228 #if 0 229 MEMCTRL_STRING_ATTR(mc_version,EDAC_MC_VERSION,S_IRUGO,memctrl_string_show,NULL); 230 #endif 231 232 /* csrow<id> control files */ 233 MEMCTRL_ATTR(panic_on_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store); 234 MEMCTRL_ATTR(log_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store); 235 MEMCTRL_ATTR(log_ce,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store); 236 MEMCTRL_ATTR(poll_msec,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store); 237 238 /* Base Attributes of the memory ECC object */ 239 static struct memctrl_dev_attribute *memctrl_attr[] = { 240 &attr_panic_on_ue, 241 &attr_log_ue, 242 &attr_log_ce, 243 &attr_poll_msec, 244 NULL, 245 }; 246 247 /* Main MC kobject release() function */ 248 static void edac_memctrl_master_release(struct kobject *kobj) 249 { 250 debugf1("%s()\n", __func__); 251 complete(&edac_memctrl_kobj_complete); 252 } 253 254 static struct kobj_type ktype_memctrl = { 255 .release = edac_memctrl_master_release, 256 .sysfs_ops = &memctrlfs_ops, 257 .default_attrs = (struct attribute **) memctrl_attr, 258 }; 259 260 #endif /* DISABLE_EDAC_SYSFS */ 261 262 /* Initialize the main sysfs entries for edac: 263 * /sys/devices/system/edac 264 * 265 * and children 266 * 267 * Return: 0 SUCCESS 268 * !0 FAILURE 269 */ 270 static int edac_sysfs_memctrl_setup(void) 271 #ifdef DISABLE_EDAC_SYSFS 272 { 273 return 0; 274 } 275 #else 276 { 277 int err=0; 278 279 debugf1("%s()\n", __func__); 280 281 /* create the /sys/devices/system/edac directory */ 282 err = sysdev_class_register(&edac_class); 283 284 if (!err) { 285 /* Init the MC's kobject */ 286 memset(&edac_memctrl_kobj, 0, sizeof (edac_memctrl_kobj)); 287 edac_memctrl_kobj.parent = &edac_class.kset.kobj; 288 edac_memctrl_kobj.ktype = &ktype_memctrl; 289 290 /* generate sysfs "..../edac/mc" */ 291 err = kobject_set_name(&edac_memctrl_kobj,"mc"); 292 293 if (!err) { 294 /* FIXME: maybe new sysdev_create_subdir() */ 295 err = kobject_register(&edac_memctrl_kobj); 296 297 if (err) 298 debugf1("Failed to register '.../edac/mc'\n"); 299 else 300 debugf1("Registered '.../edac/mc' kobject\n"); 301 } 302 } else 303 debugf1("%s() error=%d\n", __func__, err); 304 305 return err; 306 } 307 #endif /* DISABLE_EDAC_SYSFS */ 308 309 /* 310 * MC teardown: 311 * the '..../edac/mc' kobject followed by '..../edac' itself 312 */ 313 static void edac_sysfs_memctrl_teardown(void) 314 { 315 #ifndef DISABLE_EDAC_SYSFS 316 debugf0("MC: " __FILE__ ": %s()\n", __func__); 317 318 /* Unregister the MC's kobject and wait for reference count to reach 319 * 0. 320 */ 321 init_completion(&edac_memctrl_kobj_complete); 322 kobject_unregister(&edac_memctrl_kobj); 323 wait_for_completion(&edac_memctrl_kobj_complete); 324 325 /* Unregister the 'edac' object */ 326 sysdev_class_unregister(&edac_class); 327 #endif /* DISABLE_EDAC_SYSFS */ 328 } 329 330 #ifdef CONFIG_PCI 331 332 #ifndef DISABLE_EDAC_SYSFS 333 334 /* 335 * /sys/devices/system/edac/pci; 336 * data structures and methods 337 */ 338 339 struct list_control { 340 struct edac_pci_device_list *list; 341 int *count; 342 }; 343 344 #if 0 345 /* Output the list as: vendor_id:device:id<,vendor_id:device_id> */ 346 static ssize_t edac_pci_list_string_show(void *ptr, char *buffer) 347 { 348 struct list_control *listctl; 349 struct edac_pci_device_list *list; 350 char *p = buffer; 351 int len=0; 352 int i; 353 354 listctl = ptr; 355 list = listctl->list; 356 357 for (i = 0; i < *(listctl->count); i++, list++ ) { 358 if (len > 0) 359 len += snprintf(p + len, (PAGE_SIZE-len), ","); 360 361 len += snprintf(p + len, 362 (PAGE_SIZE-len), 363 "%x:%x", 364 list->vendor,list->device); 365 } 366 367 len += snprintf(p + len,(PAGE_SIZE-len), "\n"); 368 return (ssize_t) len; 369 } 370 371 /** 372 * 373 * Scan string from **s to **e looking for one 'vendor:device' tuple 374 * where each field is a hex value 375 * 376 * return 0 if an entry is NOT found 377 * return 1 if an entry is found 378 * fill in *vendor_id and *device_id with values found 379 * 380 * In both cases, make sure *s has been moved forward toward *e 381 */ 382 static int parse_one_device(const char **s,const char **e, 383 unsigned int *vendor_id, unsigned int *device_id) 384 { 385 const char *runner, *p; 386 387 /* if null byte, we are done */ 388 if (!**s) { 389 (*s)++; /* keep *s moving */ 390 return 0; 391 } 392 393 /* skip over newlines & whitespace */ 394 if ((**s == '\n') || isspace(**s)) { 395 (*s)++; 396 return 0; 397 } 398 399 if (!isxdigit(**s)) { 400 (*s)++; 401 return 0; 402 } 403 404 /* parse vendor_id */ 405 runner = *s; 406 407 while (runner < *e) { 408 /* scan for vendor:device delimiter */ 409 if (*runner == ':') { 410 *vendor_id = simple_strtol((char*) *s, (char**) &p, 16); 411 runner = p + 1; 412 break; 413 } 414 415 runner++; 416 } 417 418 if (!isxdigit(*runner)) { 419 *s = ++runner; 420 return 0; 421 } 422 423 /* parse device_id */ 424 if (runner < *e) { 425 *device_id = simple_strtol((char*)runner, (char**)&p, 16); 426 runner = p; 427 } 428 429 *s = runner; 430 return 1; 431 } 432 433 static ssize_t edac_pci_list_string_store(void *ptr, const char *buffer, 434 size_t count) 435 { 436 struct list_control *listctl; 437 struct edac_pci_device_list *list; 438 unsigned int vendor_id, device_id; 439 const char *s, *e; 440 int *index; 441 442 s = (char*)buffer; 443 e = s + count; 444 listctl = ptr; 445 list = listctl->list; 446 index = listctl->count; 447 *index = 0; 448 449 while (*index < MAX_LISTED_PCI_DEVICES) { 450 if (parse_one_device(&s,&e,&vendor_id,&device_id)) { 451 list[ *index ].vendor = vendor_id; 452 list[ *index ].device = device_id; 453 (*index)++; 454 } 455 456 /* check for all data consume */ 457 if (s >= e) 458 break; 459 } 460 461 return count; 462 } 463 464 #endif 465 static ssize_t edac_pci_int_show(void *ptr, char *buffer) 466 { 467 int *value = ptr; 468 return sprintf(buffer,"%d\n",*value); 469 } 470 471 static ssize_t edac_pci_int_store(void *ptr, const char *buffer, size_t count) 472 { 473 int *value = ptr; 474 475 if (isdigit(*buffer)) 476 *value = simple_strtoul(buffer,NULL,0); 477 478 return count; 479 } 480 481 struct edac_pci_dev_attribute { 482 struct attribute attr; 483 void *value; 484 ssize_t (*show)(void *,char *); 485 ssize_t (*store)(void *, const char *,size_t); 486 }; 487 488 /* Set of show/store abstract level functions for PCI Parity object */ 489 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr, 490 char *buffer) 491 { 492 struct edac_pci_dev_attribute *edac_pci_dev; 493 edac_pci_dev= (struct edac_pci_dev_attribute*)attr; 494 495 if (edac_pci_dev->show) 496 return edac_pci_dev->show(edac_pci_dev->value, buffer); 497 return -EIO; 498 } 499 500 static ssize_t edac_pci_dev_store(struct kobject *kobj, 501 struct attribute *attr, const char *buffer, size_t count) 502 { 503 struct edac_pci_dev_attribute *edac_pci_dev; 504 edac_pci_dev= (struct edac_pci_dev_attribute*)attr; 505 506 if (edac_pci_dev->show) 507 return edac_pci_dev->store(edac_pci_dev->value, buffer, count); 508 return -EIO; 509 } 510 511 static struct sysfs_ops edac_pci_sysfs_ops = { 512 .show = edac_pci_dev_show, 513 .store = edac_pci_dev_store 514 }; 515 516 #define EDAC_PCI_ATTR(_name,_mode,_show,_store) \ 517 struct edac_pci_dev_attribute edac_pci_attr_##_name = { \ 518 .attr = {.name = __stringify(_name), .mode = _mode }, \ 519 .value = &_name, \ 520 .show = _show, \ 521 .store = _store, \ 522 }; 523 524 #define EDAC_PCI_STRING_ATTR(_name,_data,_mode,_show,_store) \ 525 struct edac_pci_dev_attribute edac_pci_attr_##_name = { \ 526 .attr = {.name = __stringify(_name), .mode = _mode }, \ 527 .value = _data, \ 528 .show = _show, \ 529 .store = _store, \ 530 }; 531 532 #if 0 533 static struct list_control pci_whitelist_control = { 534 .list = pci_whitelist, 535 .count = &pci_whitelist_count 536 }; 537 538 static struct list_control pci_blacklist_control = { 539 .list = pci_blacklist, 540 .count = &pci_blacklist_count 541 }; 542 543 /* whitelist attribute */ 544 EDAC_PCI_STRING_ATTR(pci_parity_whitelist, 545 &pci_whitelist_control, 546 S_IRUGO|S_IWUSR, 547 edac_pci_list_string_show, 548 edac_pci_list_string_store); 549 550 EDAC_PCI_STRING_ATTR(pci_parity_blacklist, 551 &pci_blacklist_control, 552 S_IRUGO|S_IWUSR, 553 edac_pci_list_string_show, 554 edac_pci_list_string_store); 555 #endif 556 557 /* PCI Parity control files */ 558 EDAC_PCI_ATTR(check_pci_parity, S_IRUGO|S_IWUSR, edac_pci_int_show, 559 edac_pci_int_store); 560 EDAC_PCI_ATTR(panic_on_pci_parity, S_IRUGO|S_IWUSR, edac_pci_int_show, 561 edac_pci_int_store); 562 EDAC_PCI_ATTR(pci_parity_count, S_IRUGO, edac_pci_int_show, NULL); 563 564 /* Base Attributes of the memory ECC object */ 565 static struct edac_pci_dev_attribute *edac_pci_attr[] = { 566 &edac_pci_attr_check_pci_parity, 567 &edac_pci_attr_panic_on_pci_parity, 568 &edac_pci_attr_pci_parity_count, 569 NULL, 570 }; 571 572 /* No memory to release */ 573 static void edac_pci_release(struct kobject *kobj) 574 { 575 debugf1("%s()\n", __func__); 576 complete(&edac_pci_kobj_complete); 577 } 578 579 static struct kobj_type ktype_edac_pci = { 580 .release = edac_pci_release, 581 .sysfs_ops = &edac_pci_sysfs_ops, 582 .default_attrs = (struct attribute **) edac_pci_attr, 583 }; 584 585 #endif /* DISABLE_EDAC_SYSFS */ 586 587 /** 588 * edac_sysfs_pci_setup() 589 * 590 */ 591 static int edac_sysfs_pci_setup(void) 592 #ifdef DISABLE_EDAC_SYSFS 593 { 594 return 0; 595 } 596 #else 597 { 598 int err; 599 600 debugf1("%s()\n", __func__); 601 602 memset(&edac_pci_kobj, 0, sizeof(edac_pci_kobj)); 603 edac_pci_kobj.parent = &edac_class.kset.kobj; 604 edac_pci_kobj.ktype = &ktype_edac_pci; 605 err = kobject_set_name(&edac_pci_kobj, "pci"); 606 607 if (!err) { 608 /* Instanstiate the csrow object */ 609 /* FIXME: maybe new sysdev_create_subdir() */ 610 err = kobject_register(&edac_pci_kobj); 611 612 if (err) 613 debugf1("Failed to register '.../edac/pci'\n"); 614 else 615 debugf1("Registered '.../edac/pci' kobject\n"); 616 } 617 618 return err; 619 } 620 #endif /* DISABLE_EDAC_SYSFS */ 621 622 static void edac_sysfs_pci_teardown(void) 623 { 624 #ifndef DISABLE_EDAC_SYSFS 625 debugf0("%s()\n", __func__); 626 init_completion(&edac_pci_kobj_complete); 627 kobject_unregister(&edac_pci_kobj); 628 wait_for_completion(&edac_pci_kobj_complete); 629 #endif 630 } 631 632 633 static u16 get_pci_parity_status(struct pci_dev *dev, int secondary) 634 { 635 int where; 636 u16 status; 637 638 where = secondary ? PCI_SEC_STATUS : PCI_STATUS; 639 pci_read_config_word(dev, where, &status); 640 641 /* If we get back 0xFFFF then we must suspect that the card has been 642 * pulled but the Linux PCI layer has not yet finished cleaning up. 643 * We don't want to report on such devices 644 */ 645 646 if (status == 0xFFFF) { 647 u32 sanity; 648 649 pci_read_config_dword(dev, 0, &sanity); 650 651 if (sanity == 0xFFFFFFFF) 652 return 0; 653 } 654 655 status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR | 656 PCI_STATUS_PARITY; 657 658 if (status) 659 /* reset only the bits we are interested in */ 660 pci_write_config_word(dev, where, status); 661 662 return status; 663 } 664 665 typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev); 666 667 /* Clear any PCI parity errors logged by this device. */ 668 static void edac_pci_dev_parity_clear(struct pci_dev *dev) 669 { 670 u8 header_type; 671 672 get_pci_parity_status(dev, 0); 673 674 /* read the device TYPE, looking for bridges */ 675 pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type); 676 677 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) 678 get_pci_parity_status(dev, 1); 679 } 680 681 /* 682 * PCI Parity polling 683 * 684 */ 685 static void edac_pci_dev_parity_test(struct pci_dev *dev) 686 { 687 u16 status; 688 u8 header_type; 689 690 /* read the STATUS register on this device 691 */ 692 status = get_pci_parity_status(dev, 0); 693 694 debugf2("PCI STATUS= 0x%04x %s\n", status, dev->dev.bus_id ); 695 696 /* check the status reg for errors */ 697 if (status) { 698 if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) 699 edac_printk(KERN_CRIT, EDAC_PCI, 700 "Signaled System Error on %s\n", 701 pci_name(dev)); 702 703 if (status & (PCI_STATUS_PARITY)) { 704 edac_printk(KERN_CRIT, EDAC_PCI, 705 "Master Data Parity Error on %s\n", 706 pci_name(dev)); 707 708 atomic_inc(&pci_parity_count); 709 } 710 711 if (status & (PCI_STATUS_DETECTED_PARITY)) { 712 edac_printk(KERN_CRIT, EDAC_PCI, 713 "Detected Parity Error on %s\n", 714 pci_name(dev)); 715 716 atomic_inc(&pci_parity_count); 717 } 718 } 719 720 /* read the device TYPE, looking for bridges */ 721 pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type); 722 723 debugf2("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev->dev.bus_id ); 724 725 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { 726 /* On bridges, need to examine secondary status register */ 727 status = get_pci_parity_status(dev, 1); 728 729 debugf2("PCI SEC_STATUS= 0x%04x %s\n", 730 status, dev->dev.bus_id ); 731 732 /* check the secondary status reg for errors */ 733 if (status) { 734 if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) 735 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge " 736 "Signaled System Error on %s\n", 737 pci_name(dev)); 738 739 if (status & (PCI_STATUS_PARITY)) { 740 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge " 741 "Master Data Parity Error on " 742 "%s\n", pci_name(dev)); 743 744 atomic_inc(&pci_parity_count); 745 } 746 747 if (status & (PCI_STATUS_DETECTED_PARITY)) { 748 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge " 749 "Detected Parity Error on %s\n", 750 pci_name(dev)); 751 752 atomic_inc(&pci_parity_count); 753 } 754 } 755 } 756 } 757 758 /* 759 * check_dev_on_list: Scan for a PCI device on a white/black list 760 * @list: an EDAC &edac_pci_device_list white/black list pointer 761 * @free_index: index of next free entry on the list 762 * @pci_dev: PCI Device pointer 763 * 764 * see if list contains the device. 765 * 766 * Returns: 0 not found 767 * 1 found on list 768 */ 769 static int check_dev_on_list(struct edac_pci_device_list *list, 770 int free_index, struct pci_dev *dev) 771 { 772 int i; 773 int rc = 0; /* Assume not found */ 774 unsigned short vendor=dev->vendor; 775 unsigned short device=dev->device; 776 777 /* Scan the list, looking for a vendor/device match */ 778 for (i = 0; i < free_index; i++, list++ ) { 779 if ((list->vendor == vendor ) && (list->device == device )) { 780 rc = 1; 781 break; 782 } 783 } 784 785 return rc; 786 } 787 788 /* 789 * pci_dev parity list iterator 790 * Scan the PCI device list for one iteration, looking for SERRORs 791 * Master Parity ERRORS or Parity ERRORs on primary or secondary devices 792 */ 793 static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn) 794 { 795 struct pci_dev *dev = NULL; 796 797 /* request for kernel access to the next PCI device, if any, 798 * and while we are looking at it have its reference count 799 * bumped until we are done with it 800 */ 801 while((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 802 /* if whitelist exists then it has priority, so only scan 803 * those devices on the whitelist 804 */ 805 if (pci_whitelist_count > 0 ) { 806 if (check_dev_on_list(pci_whitelist, 807 pci_whitelist_count, dev)) 808 fn(dev); 809 } else { 810 /* 811 * if no whitelist, then check if this devices is 812 * blacklisted 813 */ 814 if (!check_dev_on_list(pci_blacklist, 815 pci_blacklist_count, dev)) 816 fn(dev); 817 } 818 } 819 } 820 821 static void do_pci_parity_check(void) 822 { 823 unsigned long flags; 824 int before_count; 825 826 debugf3("%s()\n", __func__); 827 828 if (!check_pci_parity) 829 return; 830 831 before_count = atomic_read(&pci_parity_count); 832 833 /* scan all PCI devices looking for a Parity Error on devices and 834 * bridges 835 */ 836 local_irq_save(flags); 837 edac_pci_dev_parity_iterator(edac_pci_dev_parity_test); 838 local_irq_restore(flags); 839 840 /* Only if operator has selected panic on PCI Error */ 841 if (panic_on_pci_parity) { 842 /* If the count is different 'after' from 'before' */ 843 if (before_count != atomic_read(&pci_parity_count)) 844 panic("EDAC: PCI Parity Error"); 845 } 846 } 847 848 static inline void clear_pci_parity_errors(void) 849 { 850 /* Clear any PCI bus parity errors that devices initially have logged 851 * in their registers. 852 */ 853 edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear); 854 } 855 856 #else /* CONFIG_PCI */ 857 858 static inline void do_pci_parity_check(void) 859 { 860 /* no-op */ 861 } 862 863 static inline void clear_pci_parity_errors(void) 864 { 865 /* no-op */ 866 } 867 868 static void edac_sysfs_pci_teardown(void) 869 { 870 } 871 872 static int edac_sysfs_pci_setup(void) 873 { 874 return 0; 875 } 876 #endif /* CONFIG_PCI */ 877 878 #ifndef DISABLE_EDAC_SYSFS 879 880 /* EDAC sysfs CSROW data structures and methods */ 881 882 /* Set of more detailed csrow<id> attribute show/store functions */ 883 static ssize_t csrow_ch0_dimm_label_show(struct csrow_info *csrow, char *data) 884 { 885 ssize_t size = 0; 886 887 if (csrow->nr_channels > 0) { 888 size = snprintf(data, EDAC_MC_LABEL_LEN,"%s\n", 889 csrow->channels[0].label); 890 } 891 892 return size; 893 } 894 895 static ssize_t csrow_ch1_dimm_label_show(struct csrow_info *csrow, char *data) 896 { 897 ssize_t size = 0; 898 899 if (csrow->nr_channels > 0) { 900 size = snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", 901 csrow->channels[1].label); 902 } 903 904 return size; 905 } 906 907 static ssize_t csrow_ch0_dimm_label_store(struct csrow_info *csrow, 908 const char *data, size_t size) 909 { 910 ssize_t max_size = 0; 911 912 if (csrow->nr_channels > 0) { 913 max_size = min((ssize_t)size,(ssize_t)EDAC_MC_LABEL_LEN-1); 914 strncpy(csrow->channels[0].label, data, max_size); 915 csrow->channels[0].label[max_size] = '\0'; 916 } 917 918 return size; 919 } 920 921 static ssize_t csrow_ch1_dimm_label_store(struct csrow_info *csrow, 922 const char *data, size_t size) 923 { 924 ssize_t max_size = 0; 925 926 if (csrow->nr_channels > 1) { 927 max_size = min((ssize_t)size,(ssize_t)EDAC_MC_LABEL_LEN-1); 928 strncpy(csrow->channels[1].label, data, max_size); 929 csrow->channels[1].label[max_size] = '\0'; 930 } 931 932 return max_size; 933 } 934 935 static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data) 936 { 937 return sprintf(data,"%u\n", csrow->ue_count); 938 } 939 940 static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data) 941 { 942 return sprintf(data,"%u\n", csrow->ce_count); 943 } 944 945 static ssize_t csrow_ch0_ce_count_show(struct csrow_info *csrow, char *data) 946 { 947 ssize_t size = 0; 948 949 if (csrow->nr_channels > 0) { 950 size = sprintf(data,"%u\n", csrow->channels[0].ce_count); 951 } 952 953 return size; 954 } 955 956 static ssize_t csrow_ch1_ce_count_show(struct csrow_info *csrow, char *data) 957 { 958 ssize_t size = 0; 959 960 if (csrow->nr_channels > 1) { 961 size = sprintf(data,"%u\n", csrow->channels[1].ce_count); 962 } 963 964 return size; 965 } 966 967 static ssize_t csrow_size_show(struct csrow_info *csrow, char *data) 968 { 969 return sprintf(data,"%u\n", PAGES_TO_MiB(csrow->nr_pages)); 970 } 971 972 static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data) 973 { 974 return sprintf(data,"%s\n", mem_types[csrow->mtype]); 975 } 976 977 static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data) 978 { 979 return sprintf(data,"%s\n", dev_types[csrow->dtype]); 980 } 981 982 static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data) 983 { 984 return sprintf(data,"%s\n", edac_caps[csrow->edac_mode]); 985 } 986 987 struct csrowdev_attribute { 988 struct attribute attr; 989 ssize_t (*show)(struct csrow_info *,char *); 990 ssize_t (*store)(struct csrow_info *, const char *,size_t); 991 }; 992 993 #define to_csrow(k) container_of(k, struct csrow_info, kobj) 994 #define to_csrowdev_attr(a) container_of(a, struct csrowdev_attribute, attr) 995 996 /* Set of show/store higher level functions for csrow objects */ 997 static ssize_t csrowdev_show(struct kobject *kobj, struct attribute *attr, 998 char *buffer) 999 { 1000 struct csrow_info *csrow = to_csrow(kobj); 1001 struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr); 1002 1003 if (csrowdev_attr->show) 1004 return csrowdev_attr->show(csrow, buffer); 1005 1006 return -EIO; 1007 } 1008 1009 static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr, 1010 const char *buffer, size_t count) 1011 { 1012 struct csrow_info *csrow = to_csrow(kobj); 1013 struct csrowdev_attribute * csrowdev_attr = to_csrowdev_attr(attr); 1014 1015 if (csrowdev_attr->store) 1016 return csrowdev_attr->store(csrow, buffer, count); 1017 1018 return -EIO; 1019 } 1020 1021 static struct sysfs_ops csrowfs_ops = { 1022 .show = csrowdev_show, 1023 .store = csrowdev_store 1024 }; 1025 1026 #define CSROWDEV_ATTR(_name,_mode,_show,_store) \ 1027 struct csrowdev_attribute attr_##_name = { \ 1028 .attr = {.name = __stringify(_name), .mode = _mode }, \ 1029 .show = _show, \ 1030 .store = _store, \ 1031 }; 1032 1033 /* cwrow<id>/attribute files */ 1034 CSROWDEV_ATTR(size_mb,S_IRUGO,csrow_size_show,NULL); 1035 CSROWDEV_ATTR(dev_type,S_IRUGO,csrow_dev_type_show,NULL); 1036 CSROWDEV_ATTR(mem_type,S_IRUGO,csrow_mem_type_show,NULL); 1037 CSROWDEV_ATTR(edac_mode,S_IRUGO,csrow_edac_mode_show,NULL); 1038 CSROWDEV_ATTR(ue_count,S_IRUGO,csrow_ue_count_show,NULL); 1039 CSROWDEV_ATTR(ce_count,S_IRUGO,csrow_ce_count_show,NULL); 1040 CSROWDEV_ATTR(ch0_ce_count,S_IRUGO,csrow_ch0_ce_count_show,NULL); 1041 CSROWDEV_ATTR(ch1_ce_count,S_IRUGO,csrow_ch1_ce_count_show,NULL); 1042 1043 /* control/attribute files */ 1044 CSROWDEV_ATTR(ch0_dimm_label,S_IRUGO|S_IWUSR, 1045 csrow_ch0_dimm_label_show, 1046 csrow_ch0_dimm_label_store); 1047 CSROWDEV_ATTR(ch1_dimm_label,S_IRUGO|S_IWUSR, 1048 csrow_ch1_dimm_label_show, 1049 csrow_ch1_dimm_label_store); 1050 1051 /* Attributes of the CSROW<id> object */ 1052 static struct csrowdev_attribute *csrow_attr[] = { 1053 &attr_dev_type, 1054 &attr_mem_type, 1055 &attr_edac_mode, 1056 &attr_size_mb, 1057 &attr_ue_count, 1058 &attr_ce_count, 1059 &attr_ch0_ce_count, 1060 &attr_ch1_ce_count, 1061 &attr_ch0_dimm_label, 1062 &attr_ch1_dimm_label, 1063 NULL, 1064 }; 1065 1066 /* No memory to release */ 1067 static void edac_csrow_instance_release(struct kobject *kobj) 1068 { 1069 struct csrow_info *cs; 1070 1071 debugf1("%s()\n", __func__); 1072 cs = container_of(kobj, struct csrow_info, kobj); 1073 complete(&cs->kobj_complete); 1074 } 1075 1076 static struct kobj_type ktype_csrow = { 1077 .release = edac_csrow_instance_release, 1078 .sysfs_ops = &csrowfs_ops, 1079 .default_attrs = (struct attribute **) csrow_attr, 1080 }; 1081 1082 /* Create a CSROW object under specifed edac_mc_device */ 1083 static int edac_create_csrow_object(struct kobject *edac_mci_kobj, 1084 struct csrow_info *csrow, int index) 1085 { 1086 int err = 0; 1087 1088 debugf0("%s()\n", __func__); 1089 memset(&csrow->kobj, 0, sizeof(csrow->kobj)); 1090 1091 /* generate ..../edac/mc/mc<id>/csrow<index> */ 1092 1093 csrow->kobj.parent = edac_mci_kobj; 1094 csrow->kobj.ktype = &ktype_csrow; 1095 1096 /* name this instance of csrow<id> */ 1097 err = kobject_set_name(&csrow->kobj,"csrow%d",index); 1098 1099 if (!err) { 1100 /* Instanstiate the csrow object */ 1101 err = kobject_register(&csrow->kobj); 1102 1103 if (err) 1104 debugf0("Failed to register CSROW%d\n",index); 1105 else 1106 debugf0("Registered CSROW%d\n",index); 1107 } 1108 1109 return err; 1110 } 1111 1112 /* sysfs data structures and methods for the MCI kobjects */ 1113 1114 static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci, 1115 const char *data, size_t count) 1116 { 1117 int row, chan; 1118 1119 mci->ue_noinfo_count = 0; 1120 mci->ce_noinfo_count = 0; 1121 mci->ue_count = 0; 1122 mci->ce_count = 0; 1123 1124 for (row = 0; row < mci->nr_csrows; row++) { 1125 struct csrow_info *ri = &mci->csrows[row]; 1126 1127 ri->ue_count = 0; 1128 ri->ce_count = 0; 1129 1130 for (chan = 0; chan < ri->nr_channels; chan++) 1131 ri->channels[chan].ce_count = 0; 1132 } 1133 1134 mci->start_time = jiffies; 1135 return count; 1136 } 1137 1138 static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data) 1139 { 1140 return sprintf(data,"%d\n", mci->ue_count); 1141 } 1142 1143 static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data) 1144 { 1145 return sprintf(data,"%d\n", mci->ce_count); 1146 } 1147 1148 static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data) 1149 { 1150 return sprintf(data,"%d\n", mci->ce_noinfo_count); 1151 } 1152 1153 static ssize_t mci_ue_noinfo_show(struct mem_ctl_info *mci, char *data) 1154 { 1155 return sprintf(data,"%d\n", mci->ue_noinfo_count); 1156 } 1157 1158 static ssize_t mci_seconds_show(struct mem_ctl_info *mci, char *data) 1159 { 1160 return sprintf(data,"%ld\n", (jiffies - mci->start_time) / HZ); 1161 } 1162 1163 static ssize_t mci_mod_name_show(struct mem_ctl_info *mci, char *data) 1164 { 1165 return sprintf(data,"%s %s\n", mci->mod_name, mci->mod_ver); 1166 } 1167 1168 static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data) 1169 { 1170 return sprintf(data,"%s\n", mci->ctl_name); 1171 } 1172 1173 static int mci_output_edac_cap(char *buf, unsigned long edac_cap) 1174 { 1175 char *p = buf; 1176 int bit_idx; 1177 1178 for (bit_idx = 0; bit_idx < 8 * sizeof(edac_cap); bit_idx++) { 1179 if ((edac_cap >> bit_idx) & 0x1) 1180 p += sprintf(p, "%s ", edac_caps[bit_idx]); 1181 } 1182 1183 return p - buf; 1184 } 1185 1186 static ssize_t mci_edac_capability_show(struct mem_ctl_info *mci, char *data) 1187 { 1188 char *p = data; 1189 1190 p += mci_output_edac_cap(p,mci->edac_ctl_cap); 1191 p += sprintf(p, "\n"); 1192 return p - data; 1193 } 1194 1195 static ssize_t mci_edac_current_capability_show(struct mem_ctl_info *mci, 1196 char *data) 1197 { 1198 char *p = data; 1199 1200 p += mci_output_edac_cap(p,mci->edac_cap); 1201 p += sprintf(p, "\n"); 1202 return p - data; 1203 } 1204 1205 static int mci_output_mtype_cap(char *buf, unsigned long mtype_cap) 1206 { 1207 char *p = buf; 1208 int bit_idx; 1209 1210 for (bit_idx = 0; bit_idx < 8 * sizeof(mtype_cap); bit_idx++) { 1211 if ((mtype_cap >> bit_idx) & 0x1) 1212 p += sprintf(p, "%s ", mem_types[bit_idx]); 1213 } 1214 1215 return p - buf; 1216 } 1217 1218 static ssize_t mci_supported_mem_type_show(struct mem_ctl_info *mci, 1219 char *data) 1220 { 1221 char *p = data; 1222 1223 p += mci_output_mtype_cap(p,mci->mtype_cap); 1224 p += sprintf(p, "\n"); 1225 return p - data; 1226 } 1227 1228 static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data) 1229 { 1230 int total_pages, csrow_idx; 1231 1232 for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows; 1233 csrow_idx++) { 1234 struct csrow_info *csrow = &mci->csrows[csrow_idx]; 1235 1236 if (!csrow->nr_pages) 1237 continue; 1238 1239 total_pages += csrow->nr_pages; 1240 } 1241 1242 return sprintf(data,"%u\n", PAGES_TO_MiB(total_pages)); 1243 } 1244 1245 struct mcidev_attribute { 1246 struct attribute attr; 1247 ssize_t (*show)(struct mem_ctl_info *,char *); 1248 ssize_t (*store)(struct mem_ctl_info *, const char *,size_t); 1249 }; 1250 1251 #define to_mci(k) container_of(k, struct mem_ctl_info, edac_mci_kobj) 1252 #define to_mcidev_attr(a) container_of(a, struct mcidev_attribute, attr) 1253 1254 static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr, 1255 char *buffer) 1256 { 1257 struct mem_ctl_info *mem_ctl_info = to_mci(kobj); 1258 struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr); 1259 1260 if (mcidev_attr->show) 1261 return mcidev_attr->show(mem_ctl_info, buffer); 1262 1263 return -EIO; 1264 } 1265 1266 static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr, 1267 const char *buffer, size_t count) 1268 { 1269 struct mem_ctl_info *mem_ctl_info = to_mci(kobj); 1270 struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr); 1271 1272 if (mcidev_attr->store) 1273 return mcidev_attr->store(mem_ctl_info, buffer, count); 1274 1275 return -EIO; 1276 } 1277 1278 static struct sysfs_ops mci_ops = { 1279 .show = mcidev_show, 1280 .store = mcidev_store 1281 }; 1282 1283 #define MCIDEV_ATTR(_name,_mode,_show,_store) \ 1284 struct mcidev_attribute mci_attr_##_name = { \ 1285 .attr = {.name = __stringify(_name), .mode = _mode }, \ 1286 .show = _show, \ 1287 .store = _store, \ 1288 }; 1289 1290 /* Control file */ 1291 MCIDEV_ATTR(reset_counters,S_IWUSR,NULL,mci_reset_counters_store); 1292 1293 /* Attribute files */ 1294 MCIDEV_ATTR(mc_name,S_IRUGO,mci_ctl_name_show,NULL); 1295 MCIDEV_ATTR(module_name,S_IRUGO,mci_mod_name_show,NULL); 1296 MCIDEV_ATTR(edac_capability,S_IRUGO,mci_edac_capability_show,NULL); 1297 MCIDEV_ATTR(size_mb,S_IRUGO,mci_size_mb_show,NULL); 1298 MCIDEV_ATTR(seconds_since_reset,S_IRUGO,mci_seconds_show,NULL); 1299 MCIDEV_ATTR(ue_noinfo_count,S_IRUGO,mci_ue_noinfo_show,NULL); 1300 MCIDEV_ATTR(ce_noinfo_count,S_IRUGO,mci_ce_noinfo_show,NULL); 1301 MCIDEV_ATTR(ue_count,S_IRUGO,mci_ue_count_show,NULL); 1302 MCIDEV_ATTR(ce_count,S_IRUGO,mci_ce_count_show,NULL); 1303 MCIDEV_ATTR(edac_current_capability,S_IRUGO, 1304 mci_edac_current_capability_show,NULL); 1305 MCIDEV_ATTR(supported_mem_type,S_IRUGO, 1306 mci_supported_mem_type_show,NULL); 1307 1308 static struct mcidev_attribute *mci_attr[] = { 1309 &mci_attr_reset_counters, 1310 &mci_attr_module_name, 1311 &mci_attr_mc_name, 1312 &mci_attr_edac_capability, 1313 &mci_attr_edac_current_capability, 1314 &mci_attr_supported_mem_type, 1315 &mci_attr_size_mb, 1316 &mci_attr_seconds_since_reset, 1317 &mci_attr_ue_noinfo_count, 1318 &mci_attr_ce_noinfo_count, 1319 &mci_attr_ue_count, 1320 &mci_attr_ce_count, 1321 NULL 1322 }; 1323 1324 /* 1325 * Release of a MC controlling instance 1326 */ 1327 static void edac_mci_instance_release(struct kobject *kobj) 1328 { 1329 struct mem_ctl_info *mci; 1330 1331 mci = to_mci(kobj); 1332 debugf0("%s() idx=%d\n", __func__, mci->mc_idx); 1333 complete(&mci->kobj_complete); 1334 } 1335 1336 static struct kobj_type ktype_mci = { 1337 .release = edac_mci_instance_release, 1338 .sysfs_ops = &mci_ops, 1339 .default_attrs = (struct attribute **) mci_attr, 1340 }; 1341 1342 #endif /* DISABLE_EDAC_SYSFS */ 1343 1344 #define EDAC_DEVICE_SYMLINK "device" 1345 1346 /* 1347 * Create a new Memory Controller kobject instance, 1348 * mc<id> under the 'mc' directory 1349 * 1350 * Return: 1351 * 0 Success 1352 * !0 Failure 1353 */ 1354 static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) 1355 #ifdef DISABLE_EDAC_SYSFS 1356 { 1357 return 0; 1358 } 1359 #else 1360 { 1361 int i; 1362 int err; 1363 struct csrow_info *csrow; 1364 struct kobject *edac_mci_kobj=&mci->edac_mci_kobj; 1365 1366 debugf0("%s() idx=%d\n", __func__, mci->mc_idx); 1367 memset(edac_mci_kobj, 0, sizeof(*edac_mci_kobj)); 1368 1369 /* set the name of the mc<id> object */ 1370 err = kobject_set_name(edac_mci_kobj,"mc%d",mci->mc_idx); 1371 1372 if (err) 1373 return err; 1374 1375 /* link to our parent the '..../edac/mc' object */ 1376 edac_mci_kobj->parent = &edac_memctrl_kobj; 1377 edac_mci_kobj->ktype = &ktype_mci; 1378 1379 /* register the mc<id> kobject */ 1380 err = kobject_register(edac_mci_kobj); 1381 1382 if (err) 1383 return err; 1384 1385 /* create a symlink for the device */ 1386 err = sysfs_create_link(edac_mci_kobj, &mci->dev->kobj, 1387 EDAC_DEVICE_SYMLINK); 1388 1389 if (err) 1390 goto fail0; 1391 1392 /* Make directories for each CSROW object 1393 * under the mc<id> kobject 1394 */ 1395 for (i = 0; i < mci->nr_csrows; i++) { 1396 csrow = &mci->csrows[i]; 1397 1398 /* Only expose populated CSROWs */ 1399 if (csrow->nr_pages > 0) { 1400 err = edac_create_csrow_object(edac_mci_kobj,csrow,i); 1401 1402 if (err) 1403 goto fail1; 1404 } 1405 } 1406 1407 return 0; 1408 1409 /* CSROW error: backout what has already been registered, */ 1410 fail1: 1411 for ( i--; i >= 0; i--) { 1412 if (csrow->nr_pages > 0) { 1413 init_completion(&csrow->kobj_complete); 1414 kobject_unregister(&mci->csrows[i].kobj); 1415 wait_for_completion(&csrow->kobj_complete); 1416 } 1417 } 1418 1419 fail0: 1420 init_completion(&mci->kobj_complete); 1421 kobject_unregister(edac_mci_kobj); 1422 wait_for_completion(&mci->kobj_complete); 1423 return err; 1424 } 1425 #endif /* DISABLE_EDAC_SYSFS */ 1426 1427 /* 1428 * remove a Memory Controller instance 1429 */ 1430 static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) 1431 { 1432 #ifndef DISABLE_EDAC_SYSFS 1433 int i; 1434 1435 debugf0("%s()\n", __func__); 1436 1437 /* remove all csrow kobjects */ 1438 for (i = 0; i < mci->nr_csrows; i++) { 1439 if (mci->csrows[i].nr_pages > 0) { 1440 init_completion(&mci->csrows[i].kobj_complete); 1441 kobject_unregister(&mci->csrows[i].kobj); 1442 wait_for_completion(&mci->csrows[i].kobj_complete); 1443 } 1444 } 1445 1446 sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK); 1447 init_completion(&mci->kobj_complete); 1448 kobject_unregister(&mci->edac_mci_kobj); 1449 wait_for_completion(&mci->kobj_complete); 1450 #endif /* DISABLE_EDAC_SYSFS */ 1451 } 1452 1453 /* END OF sysfs data and methods */ 1454 1455 #ifdef CONFIG_EDAC_DEBUG 1456 1457 void edac_mc_dump_channel(struct channel_info *chan) 1458 { 1459 debugf4("\tchannel = %p\n", chan); 1460 debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx); 1461 debugf4("\tchannel->ce_count = %d\n", chan->ce_count); 1462 debugf4("\tchannel->label = '%s'\n", chan->label); 1463 debugf4("\tchannel->csrow = %p\n\n", chan->csrow); 1464 } 1465 EXPORT_SYMBOL_GPL(edac_mc_dump_channel); 1466 1467 void edac_mc_dump_csrow(struct csrow_info *csrow) 1468 { 1469 debugf4("\tcsrow = %p\n", csrow); 1470 debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx); 1471 debugf4("\tcsrow->first_page = 0x%lx\n", 1472 csrow->first_page); 1473 debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page); 1474 debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask); 1475 debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages); 1476 debugf4("\tcsrow->nr_channels = %d\n", 1477 csrow->nr_channels); 1478 debugf4("\tcsrow->channels = %p\n", csrow->channels); 1479 debugf4("\tcsrow->mci = %p\n\n", csrow->mci); 1480 } 1481 EXPORT_SYMBOL_GPL(edac_mc_dump_csrow); 1482 1483 void edac_mc_dump_mci(struct mem_ctl_info *mci) 1484 { 1485 debugf3("\tmci = %p\n", mci); 1486 debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap); 1487 debugf3("\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap); 1488 debugf3("\tmci->edac_cap = %lx\n", mci->edac_cap); 1489 debugf4("\tmci->edac_check = %p\n", mci->edac_check); 1490 debugf3("\tmci->nr_csrows = %d, csrows = %p\n", 1491 mci->nr_csrows, mci->csrows); 1492 debugf3("\tdev = %p\n", mci->dev); 1493 debugf3("\tmod_name:ctl_name = %s:%s\n", 1494 mci->mod_name, mci->ctl_name); 1495 debugf3("\tpvt_info = %p\n\n", mci->pvt_info); 1496 } 1497 EXPORT_SYMBOL_GPL(edac_mc_dump_mci); 1498 1499 #endif /* CONFIG_EDAC_DEBUG */ 1500 1501 /* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'. 1502 * Adjust 'ptr' so that its alignment is at least as stringent as what the 1503 * compiler would provide for X and return the aligned result. 1504 * 1505 * If 'size' is a constant, the compiler will optimize this whole function 1506 * down to either a no-op or the addition of a constant to the value of 'ptr'. 1507 */ 1508 static inline char * align_ptr(void *ptr, unsigned size) 1509 { 1510 unsigned align, r; 1511 1512 /* Here we assume that the alignment of a "long long" is the most 1513 * stringent alignment that the compiler will ever provide by default. 1514 * As far as I know, this is a reasonable assumption. 1515 */ 1516 if (size > sizeof(long)) 1517 align = sizeof(long long); 1518 else if (size > sizeof(int)) 1519 align = sizeof(long); 1520 else if (size > sizeof(short)) 1521 align = sizeof(int); 1522 else if (size > sizeof(char)) 1523 align = sizeof(short); 1524 else 1525 return (char *) ptr; 1526 1527 r = size % align; 1528 1529 if (r == 0) 1530 return (char *) ptr; 1531 1532 return (char *) (((unsigned long) ptr) + align - r); 1533 } 1534 1535 /** 1536 * edac_mc_alloc: Allocate a struct mem_ctl_info structure 1537 * @size_pvt: size of private storage needed 1538 * @nr_csrows: Number of CWROWS needed for this MC 1539 * @nr_chans: Number of channels for the MC 1540 * 1541 * Everything is kmalloc'ed as one big chunk - more efficient. 1542 * Only can be used if all structures have the same lifetime - otherwise 1543 * you have to allocate and initialize your own structures. 1544 * 1545 * Use edac_mc_free() to free mc structures allocated by this function. 1546 * 1547 * Returns: 1548 * NULL allocation failed 1549 * struct mem_ctl_info pointer 1550 */ 1551 struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, 1552 unsigned nr_chans) 1553 { 1554 struct mem_ctl_info *mci; 1555 struct csrow_info *csi, *csrow; 1556 struct channel_info *chi, *chp, *chan; 1557 void *pvt; 1558 unsigned size; 1559 int row, chn; 1560 1561 /* Figure out the offsets of the various items from the start of an mc 1562 * structure. We want the alignment of each item to be at least as 1563 * stringent as what the compiler would provide if we could simply 1564 * hardcode everything into a single struct. 1565 */ 1566 mci = (struct mem_ctl_info *) 0; 1567 csi = (struct csrow_info *)align_ptr(&mci[1], sizeof(*csi)); 1568 chi = (struct channel_info *) 1569 align_ptr(&csi[nr_csrows], sizeof(*chi)); 1570 pvt = align_ptr(&chi[nr_chans * nr_csrows], sz_pvt); 1571 size = ((unsigned long) pvt) + sz_pvt; 1572 1573 if ((mci = kmalloc(size, GFP_KERNEL)) == NULL) 1574 return NULL; 1575 1576 /* Adjust pointers so they point within the memory we just allocated 1577 * rather than an imaginary chunk of memory located at address 0. 1578 */ 1579 csi = (struct csrow_info *) (((char *) mci) + ((unsigned long) csi)); 1580 chi = (struct channel_info *) (((char *) mci) + ((unsigned long) chi)); 1581 pvt = sz_pvt ? (((char *) mci) + ((unsigned long) pvt)) : NULL; 1582 1583 memset(mci, 0, size); /* clear all fields */ 1584 mci->csrows = csi; 1585 mci->pvt_info = pvt; 1586 mci->nr_csrows = nr_csrows; 1587 1588 for (row = 0; row < nr_csrows; row++) { 1589 csrow = &csi[row]; 1590 csrow->csrow_idx = row; 1591 csrow->mci = mci; 1592 csrow->nr_channels = nr_chans; 1593 chp = &chi[row * nr_chans]; 1594 csrow->channels = chp; 1595 1596 for (chn = 0; chn < nr_chans; chn++) { 1597 chan = &chp[chn]; 1598 chan->chan_idx = chn; 1599 chan->csrow = csrow; 1600 } 1601 } 1602 1603 return mci; 1604 } 1605 EXPORT_SYMBOL_GPL(edac_mc_alloc); 1606 1607 /** 1608 * edac_mc_free: Free a previously allocated 'mci' structure 1609 * @mci: pointer to a struct mem_ctl_info structure 1610 */ 1611 void edac_mc_free(struct mem_ctl_info *mci) 1612 { 1613 kfree(mci); 1614 } 1615 EXPORT_SYMBOL_GPL(edac_mc_free); 1616 1617 static struct mem_ctl_info *find_mci_by_dev(struct device *dev) 1618 { 1619 struct mem_ctl_info *mci; 1620 struct list_head *item; 1621 1622 debugf3("%s()\n", __func__); 1623 1624 list_for_each(item, &mc_devices) { 1625 mci = list_entry(item, struct mem_ctl_info, link); 1626 1627 if (mci->dev == dev) 1628 return mci; 1629 } 1630 1631 return NULL; 1632 } 1633 1634 /* Return 0 on success, 1 on failure. 1635 * Before calling this function, caller must 1636 * assign a unique value to mci->mc_idx. 1637 */ 1638 static int add_mc_to_global_list (struct mem_ctl_info *mci) 1639 { 1640 struct list_head *item, *insert_before; 1641 struct mem_ctl_info *p; 1642 1643 insert_before = &mc_devices; 1644 1645 if (unlikely((p = find_mci_by_dev(mci->dev)) != NULL)) 1646 goto fail0; 1647 1648 list_for_each(item, &mc_devices) { 1649 p = list_entry(item, struct mem_ctl_info, link); 1650 1651 if (p->mc_idx >= mci->mc_idx) { 1652 if (unlikely(p->mc_idx == mci->mc_idx)) 1653 goto fail1; 1654 1655 insert_before = item; 1656 break; 1657 } 1658 } 1659 1660 list_add_tail_rcu(&mci->link, insert_before); 1661 return 0; 1662 1663 fail0: 1664 edac_printk(KERN_WARNING, EDAC_MC, 1665 "%s (%s) %s %s already assigned %d\n", p->dev->bus_id, 1666 dev_name(p->dev), p->mod_name, p->ctl_name, p->mc_idx); 1667 return 1; 1668 1669 fail1: 1670 edac_printk(KERN_WARNING, EDAC_MC, 1671 "bug in low-level driver: attempt to assign\n" 1672 " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__); 1673 return 1; 1674 } 1675 1676 static void complete_mc_list_del(struct rcu_head *head) 1677 { 1678 struct mem_ctl_info *mci; 1679 1680 mci = container_of(head, struct mem_ctl_info, rcu); 1681 INIT_LIST_HEAD(&mci->link); 1682 complete(&mci->complete); 1683 } 1684 1685 static void del_mc_from_global_list(struct mem_ctl_info *mci) 1686 { 1687 list_del_rcu(&mci->link); 1688 init_completion(&mci->complete); 1689 call_rcu(&mci->rcu, complete_mc_list_del); 1690 wait_for_completion(&mci->complete); 1691 } 1692 1693 /** 1694 * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and 1695 * create sysfs entries associated with mci structure 1696 * @mci: pointer to the mci structure to be added to the list 1697 * @mc_idx: A unique numeric identifier to be assigned to the 'mci' structure. 1698 * 1699 * Return: 1700 * 0 Success 1701 * !0 Failure 1702 */ 1703 1704 /* FIXME - should a warning be printed if no error detection? correction? */ 1705 int edac_mc_add_mc(struct mem_ctl_info *mci, int mc_idx) 1706 { 1707 debugf0("%s()\n", __func__); 1708 mci->mc_idx = mc_idx; 1709 #ifdef CONFIG_EDAC_DEBUG 1710 if (edac_debug_level >= 3) 1711 edac_mc_dump_mci(mci); 1712 1713 if (edac_debug_level >= 4) { 1714 int i; 1715 1716 for (i = 0; i < mci->nr_csrows; i++) { 1717 int j; 1718 1719 edac_mc_dump_csrow(&mci->csrows[i]); 1720 for (j = 0; j < mci->csrows[i].nr_channels; j++) 1721 edac_mc_dump_channel( 1722 &mci->csrows[i].channels[j]); 1723 } 1724 } 1725 #endif 1726 down(&mem_ctls_mutex); 1727 1728 if (add_mc_to_global_list(mci)) 1729 goto fail0; 1730 1731 /* set load time so that error rate can be tracked */ 1732 mci->start_time = jiffies; 1733 1734 if (edac_create_sysfs_mci_device(mci)) { 1735 edac_mc_printk(mci, KERN_WARNING, 1736 "failed to create sysfs device\n"); 1737 goto fail1; 1738 } 1739 1740 /* Report action taken */ 1741 edac_mc_printk(mci, KERN_INFO, "Giving out device to %s %s: DEV %s\n", 1742 mci->mod_name, mci->ctl_name, dev_name(mci->dev)); 1743 1744 up(&mem_ctls_mutex); 1745 return 0; 1746 1747 fail1: 1748 del_mc_from_global_list(mci); 1749 1750 fail0: 1751 up(&mem_ctls_mutex); 1752 return 1; 1753 } 1754 EXPORT_SYMBOL_GPL(edac_mc_add_mc); 1755 1756 /** 1757 * edac_mc_del_mc: Remove sysfs entries for specified mci structure and 1758 * remove mci structure from global list 1759 * @pdev: Pointer to 'struct device' representing mci structure to remove. 1760 * 1761 * Return pointer to removed mci structure, or NULL if device not found. 1762 */ 1763 struct mem_ctl_info * edac_mc_del_mc(struct device *dev) 1764 { 1765 struct mem_ctl_info *mci; 1766 1767 debugf0("MC: %s()\n", __func__); 1768 down(&mem_ctls_mutex); 1769 1770 if ((mci = find_mci_by_dev(dev)) == NULL) { 1771 up(&mem_ctls_mutex); 1772 return NULL; 1773 } 1774 1775 edac_remove_sysfs_mci_device(mci); 1776 del_mc_from_global_list(mci); 1777 up(&mem_ctls_mutex); 1778 edac_printk(KERN_INFO, EDAC_MC, 1779 "Removed device %d for %s %s: DEV %s\n", mci->mc_idx, 1780 mci->mod_name, mci->ctl_name, dev_name(mci->dev)); 1781 return mci; 1782 } 1783 EXPORT_SYMBOL_GPL(edac_mc_del_mc); 1784 1785 void edac_mc_scrub_block(unsigned long page, unsigned long offset, u32 size) 1786 { 1787 struct page *pg; 1788 void *virt_addr; 1789 unsigned long flags = 0; 1790 1791 debugf3("%s()\n", __func__); 1792 1793 /* ECC error page was not in our memory. Ignore it. */ 1794 if(!pfn_valid(page)) 1795 return; 1796 1797 /* Find the actual page structure then map it and fix */ 1798 pg = pfn_to_page(page); 1799 1800 if (PageHighMem(pg)) 1801 local_irq_save(flags); 1802 1803 virt_addr = kmap_atomic(pg, KM_BOUNCE_READ); 1804 1805 /* Perform architecture specific atomic scrub operation */ 1806 atomic_scrub(virt_addr + offset, size); 1807 1808 /* Unmap and complete */ 1809 kunmap_atomic(virt_addr, KM_BOUNCE_READ); 1810 1811 if (PageHighMem(pg)) 1812 local_irq_restore(flags); 1813 } 1814 EXPORT_SYMBOL_GPL(edac_mc_scrub_block); 1815 1816 /* FIXME - should return -1 */ 1817 int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page) 1818 { 1819 struct csrow_info *csrows = mci->csrows; 1820 int row, i; 1821 1822 debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page); 1823 row = -1; 1824 1825 for (i = 0; i < mci->nr_csrows; i++) { 1826 struct csrow_info *csrow = &csrows[i]; 1827 1828 if (csrow->nr_pages == 0) 1829 continue; 1830 1831 debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) " 1832 "mask(0x%lx)\n", mci->mc_idx, __func__, 1833 csrow->first_page, page, csrow->last_page, 1834 csrow->page_mask); 1835 1836 if ((page >= csrow->first_page) && 1837 (page <= csrow->last_page) && 1838 ((page & csrow->page_mask) == 1839 (csrow->first_page & csrow->page_mask))) { 1840 row = i; 1841 break; 1842 } 1843 } 1844 1845 if (row == -1) 1846 edac_mc_printk(mci, KERN_ERR, 1847 "could not look up page error address %lx\n", 1848 (unsigned long) page); 1849 1850 return row; 1851 } 1852 EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page); 1853 1854 /* FIXME - setable log (warning/emerg) levels */ 1855 /* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */ 1856 void edac_mc_handle_ce(struct mem_ctl_info *mci, 1857 unsigned long page_frame_number, unsigned long offset_in_page, 1858 unsigned long syndrome, int row, int channel, const char *msg) 1859 { 1860 unsigned long remapped_page; 1861 1862 debugf3("MC%d: %s()\n", mci->mc_idx, __func__); 1863 1864 /* FIXME - maybe make panic on INTERNAL ERROR an option */ 1865 if (row >= mci->nr_csrows || row < 0) { 1866 /* something is wrong */ 1867 edac_mc_printk(mci, KERN_ERR, 1868 "INTERNAL ERROR: row out of range " 1869 "(%d >= %d)\n", row, mci->nr_csrows); 1870 edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR"); 1871 return; 1872 } 1873 1874 if (channel >= mci->csrows[row].nr_channels || channel < 0) { 1875 /* something is wrong */ 1876 edac_mc_printk(mci, KERN_ERR, 1877 "INTERNAL ERROR: channel out of range " 1878 "(%d >= %d)\n", channel, 1879 mci->csrows[row].nr_channels); 1880 edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR"); 1881 return; 1882 } 1883 1884 if (log_ce) 1885 /* FIXME - put in DIMM location */ 1886 edac_mc_printk(mci, KERN_WARNING, 1887 "CE page 0x%lx, offset 0x%lx, grain %d, syndrome " 1888 "0x%lx, row %d, channel %d, label \"%s\": %s\n", 1889 page_frame_number, offset_in_page, 1890 mci->csrows[row].grain, syndrome, row, channel, 1891 mci->csrows[row].channels[channel].label, msg); 1892 1893 mci->ce_count++; 1894 mci->csrows[row].ce_count++; 1895 mci->csrows[row].channels[channel].ce_count++; 1896 1897 if (mci->scrub_mode & SCRUB_SW_SRC) { 1898 /* 1899 * Some MC's can remap memory so that it is still available 1900 * at a different address when PCI devices map into memory. 1901 * MC's that can't do this lose the memory where PCI devices 1902 * are mapped. This mapping is MC dependant and so we call 1903 * back into the MC driver for it to map the MC page to 1904 * a physical (CPU) page which can then be mapped to a virtual 1905 * page - which can then be scrubbed. 1906 */ 1907 remapped_page = mci->ctl_page_to_phys ? 1908 mci->ctl_page_to_phys(mci, page_frame_number) : 1909 page_frame_number; 1910 1911 edac_mc_scrub_block(remapped_page, offset_in_page, 1912 mci->csrows[row].grain); 1913 } 1914 } 1915 EXPORT_SYMBOL_GPL(edac_mc_handle_ce); 1916 1917 void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg) 1918 { 1919 if (log_ce) 1920 edac_mc_printk(mci, KERN_WARNING, 1921 "CE - no information available: %s\n", msg); 1922 1923 mci->ce_noinfo_count++; 1924 mci->ce_count++; 1925 } 1926 EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info); 1927 1928 void edac_mc_handle_ue(struct mem_ctl_info *mci, 1929 unsigned long page_frame_number, unsigned long offset_in_page, 1930 int row, const char *msg) 1931 { 1932 int len = EDAC_MC_LABEL_LEN * 4; 1933 char labels[len + 1]; 1934 char *pos = labels; 1935 int chan; 1936 int chars; 1937 1938 debugf3("MC%d: %s()\n", mci->mc_idx, __func__); 1939 1940 /* FIXME - maybe make panic on INTERNAL ERROR an option */ 1941 if (row >= mci->nr_csrows || row < 0) { 1942 /* something is wrong */ 1943 edac_mc_printk(mci, KERN_ERR, 1944 "INTERNAL ERROR: row out of range " 1945 "(%d >= %d)\n", row, mci->nr_csrows); 1946 edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR"); 1947 return; 1948 } 1949 1950 chars = snprintf(pos, len + 1, "%s", 1951 mci->csrows[row].channels[0].label); 1952 len -= chars; 1953 pos += chars; 1954 1955 for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0); 1956 chan++) { 1957 chars = snprintf(pos, len + 1, ":%s", 1958 mci->csrows[row].channels[chan].label); 1959 len -= chars; 1960 pos += chars; 1961 } 1962 1963 if (log_ue) 1964 edac_mc_printk(mci, KERN_EMERG, 1965 "UE page 0x%lx, offset 0x%lx, grain %d, row %d, " 1966 "labels \"%s\": %s\n", page_frame_number, 1967 offset_in_page, mci->csrows[row].grain, row, labels, 1968 msg); 1969 1970 if (panic_on_ue) 1971 panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, " 1972 "row %d, labels \"%s\": %s\n", mci->mc_idx, 1973 page_frame_number, offset_in_page, 1974 mci->csrows[row].grain, row, labels, msg); 1975 1976 mci->ue_count++; 1977 mci->csrows[row].ue_count++; 1978 } 1979 EXPORT_SYMBOL_GPL(edac_mc_handle_ue); 1980 1981 void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg) 1982 { 1983 if (panic_on_ue) 1984 panic("EDAC MC%d: Uncorrected Error", mci->mc_idx); 1985 1986 if (log_ue) 1987 edac_mc_printk(mci, KERN_WARNING, 1988 "UE - no information available: %s\n", msg); 1989 mci->ue_noinfo_count++; 1990 mci->ue_count++; 1991 } 1992 EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info); 1993 1994 1995 /* 1996 * Iterate over all MC instances and check for ECC, et al, errors 1997 */ 1998 static inline void check_mc_devices(void) 1999 { 2000 struct list_head *item; 2001 struct mem_ctl_info *mci; 2002 2003 debugf3("%s()\n", __func__); 2004 down(&mem_ctls_mutex); 2005 2006 list_for_each(item, &mc_devices) { 2007 mci = list_entry(item, struct mem_ctl_info, link); 2008 2009 if (mci->edac_check != NULL) 2010 mci->edac_check(mci); 2011 } 2012 2013 up(&mem_ctls_mutex); 2014 } 2015 2016 /* 2017 * Check MC status every poll_msec. 2018 * Check PCI status every poll_msec as well. 2019 * 2020 * This where the work gets done for edac. 2021 * 2022 * SMP safe, doesn't use NMI, and auto-rate-limits. 2023 */ 2024 static void do_edac_check(void) 2025 { 2026 debugf3("%s()\n", __func__); 2027 check_mc_devices(); 2028 do_pci_parity_check(); 2029 } 2030 2031 static int edac_kernel_thread(void *arg) 2032 { 2033 while (!kthread_should_stop()) { 2034 do_edac_check(); 2035 2036 /* goto sleep for the interval */ 2037 schedule_timeout_interruptible((HZ * poll_msec) / 1000); 2038 try_to_freeze(); 2039 } 2040 2041 return 0; 2042 } 2043 2044 /* 2045 * edac_mc_init 2046 * module initialization entry point 2047 */ 2048 static int __init edac_mc_init(void) 2049 { 2050 edac_printk(KERN_INFO, EDAC_MC, EDAC_MC_VERSION "\n"); 2051 2052 /* 2053 * Harvest and clear any boot/initialization PCI parity errors 2054 * 2055 * FIXME: This only clears errors logged by devices present at time of 2056 * module initialization. We should also do an initial clear 2057 * of each newly hotplugged device. 2058 */ 2059 clear_pci_parity_errors(); 2060 2061 /* Create the MC sysfs entries */ 2062 if (edac_sysfs_memctrl_setup()) { 2063 edac_printk(KERN_ERR, EDAC_MC, 2064 "Error initializing sysfs code\n"); 2065 return -ENODEV; 2066 } 2067 2068 /* Create the PCI parity sysfs entries */ 2069 if (edac_sysfs_pci_setup()) { 2070 edac_sysfs_memctrl_teardown(); 2071 edac_printk(KERN_ERR, EDAC_MC, 2072 "EDAC PCI: Error initializing sysfs code\n"); 2073 return -ENODEV; 2074 } 2075 2076 /* create our kernel thread */ 2077 edac_thread = kthread_run(edac_kernel_thread, NULL, "kedac"); 2078 2079 if (IS_ERR(edac_thread)) { 2080 /* remove the sysfs entries */ 2081 edac_sysfs_memctrl_teardown(); 2082 edac_sysfs_pci_teardown(); 2083 return PTR_ERR(edac_thread); 2084 } 2085 2086 return 0; 2087 } 2088 2089 /* 2090 * edac_mc_exit() 2091 * module exit/termination functioni 2092 */ 2093 static void __exit edac_mc_exit(void) 2094 { 2095 debugf0("%s()\n", __func__); 2096 kthread_stop(edac_thread); 2097 2098 /* tear down the sysfs device */ 2099 edac_sysfs_memctrl_teardown(); 2100 edac_sysfs_pci_teardown(); 2101 } 2102 2103 module_init(edac_mc_init); 2104 module_exit(edac_mc_exit); 2105 2106 MODULE_LICENSE("GPL"); 2107 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n" 2108 "Based on work by Dan Hollis et al"); 2109 MODULE_DESCRIPTION("Core library routines for MC reporting"); 2110 2111 module_param(panic_on_ue, int, 0644); 2112 MODULE_PARM_DESC(panic_on_ue, "Panic on uncorrected error: 0=off 1=on"); 2113 #ifdef CONFIG_PCI 2114 module_param(check_pci_parity, int, 0644); 2115 MODULE_PARM_DESC(check_pci_parity, "Check for PCI bus parity errors: 0=off 1=on"); 2116 module_param(panic_on_pci_parity, int, 0644); 2117 MODULE_PARM_DESC(panic_on_pci_parity, "Panic on PCI Bus Parity error: 0=off 1=on"); 2118 #endif 2119 module_param(log_ue, int, 0644); 2120 MODULE_PARM_DESC(log_ue, "Log uncorrectable error to console: 0=off 1=on"); 2121 module_param(log_ce, int, 0644); 2122 MODULE_PARM_DESC(log_ce, "Log correctable error to console: 0=off 1=on"); 2123 module_param(poll_msec, int, 0644); 2124 MODULE_PARM_DESC(poll_msec, "Polling period in milliseconds"); 2125 #ifdef CONFIG_EDAC_DEBUG 2126 module_param(edac_debug_level, int, 0644); 2127 MODULE_PARM_DESC(edac_debug_level, "Debug level"); 2128 #endif 2129