1 2 /* 3 * edac_device.c 4 * (C) 2007 www.douglaskthompson.com 5 * 6 * This file may be distributed under the terms of the 7 * GNU General Public License. 8 * 9 * Written by Doug Thompson <norsk5@xmission.com> 10 * 11 * edac_device API implementation 12 * 19 Jan 2007 13 */ 14 15 #include <linux/module.h> 16 #include <linux/types.h> 17 #include <linux/smp.h> 18 #include <linux/init.h> 19 #include <linux/sysctl.h> 20 #include <linux/highmem.h> 21 #include <linux/timer.h> 22 #include <linux/slab.h> 23 #include <linux/jiffies.h> 24 #include <linux/spinlock.h> 25 #include <linux/list.h> 26 #include <linux/ctype.h> 27 #include <linux/workqueue.h> 28 #include <asm/uaccess.h> 29 #include <asm/page.h> 30 31 #include "edac_core.h" 32 #include "edac_module.h" 33 34 /* lock for the list: 'edac_device_list', manipulation of this list 35 * is protected by the 'device_ctls_mutex' lock 36 */ 37 static DEFINE_MUTEX(device_ctls_mutex); 38 static LIST_HEAD(edac_device_list); 39 40 #ifdef CONFIG_EDAC_DEBUG 41 static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev) 42 { 43 edac_dbg(3, "\tedac_dev = %p dev_idx=%d\n", 44 edac_dev, edac_dev->dev_idx); 45 edac_dbg(4, "\tedac_dev->edac_check = %p\n", edac_dev->edac_check); 46 edac_dbg(3, "\tdev = %p\n", edac_dev->dev); 47 edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n", 48 edac_dev->mod_name, edac_dev->ctl_name); 49 edac_dbg(3, "\tpvt_info = %p\n\n", edac_dev->pvt_info); 50 } 51 #endif /* CONFIG_EDAC_DEBUG */ 52 53 54 /* 55 * edac_device_alloc_ctl_info() 56 * Allocate a new edac device control info structure 57 * 58 * The control structure is allocated in complete chunk 59 * from the OS. It is in turn sub allocated to the 60 * various objects that compose the structure 61 * 62 * The structure has a 'nr_instance' array within itself. 63 * Each instance represents a major component 64 * Example: L1 cache and L2 cache are 2 instance components 65 * 66 * Within each instance is an array of 'nr_blocks' blockoffsets 67 */ 68 struct edac_device_ctl_info *edac_device_alloc_ctl_info( 69 unsigned sz_private, 70 char *edac_device_name, unsigned nr_instances, 71 char *edac_block_name, unsigned nr_blocks, 72 unsigned offset_value, /* zero, 1, or other based offset */ 73 struct edac_dev_sysfs_block_attribute *attrib_spec, unsigned nr_attrib, 74 int device_index) 75 { 76 struct edac_device_ctl_info *dev_ctl; 77 struct edac_device_instance *dev_inst, *inst; 78 struct edac_device_block *dev_blk, *blk_p, *blk; 79 struct edac_dev_sysfs_block_attribute *dev_attrib, *attrib_p, *attrib; 80 unsigned total_size; 81 unsigned count; 82 unsigned instance, block, attr; 83 void *pvt, *p; 84 int err; 85 86 edac_dbg(4, "instances=%d blocks=%d\n", nr_instances, nr_blocks); 87 88 /* Calculate the size of memory we need to allocate AND 89 * determine the offsets of the various item arrays 90 * (instance,block,attrib) from the start of an allocated structure. 91 * We want the alignment of each item (instance,block,attrib) 92 * to be at least as stringent as what the compiler would 93 * provide if we could simply hardcode everything into a single struct. 94 */ 95 p = NULL; 96 dev_ctl = edac_align_ptr(&p, sizeof(*dev_ctl), 1); 97 98 /* Calc the 'end' offset past end of ONE ctl_info structure 99 * which will become the start of the 'instance' array 100 */ 101 dev_inst = edac_align_ptr(&p, sizeof(*dev_inst), nr_instances); 102 103 /* Calc the 'end' offset past the instance array within the ctl_info 104 * which will become the start of the block array 105 */ 106 count = nr_instances * nr_blocks; 107 dev_blk = edac_align_ptr(&p, sizeof(*dev_blk), count); 108 109 /* Calc the 'end' offset past the dev_blk array 110 * which will become the start of the attrib array, if any. 111 */ 112 /* calc how many nr_attrib we need */ 113 if (nr_attrib > 0) 114 count *= nr_attrib; 115 dev_attrib = edac_align_ptr(&p, sizeof(*dev_attrib), count); 116 117 /* Calc the 'end' offset past the attributes array */ 118 pvt = edac_align_ptr(&p, sz_private, 1); 119 120 /* 'pvt' now points to where the private data area is. 121 * At this point 'pvt' (like dev_inst,dev_blk and dev_attrib) 122 * is baselined at ZERO 123 */ 124 total_size = ((unsigned long)pvt) + sz_private; 125 126 /* Allocate the amount of memory for the set of control structures */ 127 dev_ctl = kzalloc(total_size, GFP_KERNEL); 128 if (dev_ctl == NULL) 129 return NULL; 130 131 /* Adjust pointers so they point within the actual memory we 132 * just allocated rather than an imaginary chunk of memory 133 * located at address 0. 134 * 'dev_ctl' points to REAL memory, while the others are 135 * ZERO based and thus need to be adjusted to point within 136 * the allocated memory. 137 */ 138 dev_inst = (struct edac_device_instance *) 139 (((char *)dev_ctl) + ((unsigned long)dev_inst)); 140 dev_blk = (struct edac_device_block *) 141 (((char *)dev_ctl) + ((unsigned long)dev_blk)); 142 dev_attrib = (struct edac_dev_sysfs_block_attribute *) 143 (((char *)dev_ctl) + ((unsigned long)dev_attrib)); 144 pvt = sz_private ? (((char *)dev_ctl) + ((unsigned long)pvt)) : NULL; 145 146 /* Begin storing the information into the control info structure */ 147 dev_ctl->dev_idx = device_index; 148 dev_ctl->nr_instances = nr_instances; 149 dev_ctl->instances = dev_inst; 150 dev_ctl->pvt_info = pvt; 151 152 /* Default logging of CEs and UEs */ 153 dev_ctl->log_ce = 1; 154 dev_ctl->log_ue = 1; 155 156 /* Name of this edac device */ 157 snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name); 158 159 edac_dbg(4, "edac_dev=%p next after end=%p\n", 160 dev_ctl, pvt + sz_private); 161 162 /* Initialize every Instance */ 163 for (instance = 0; instance < nr_instances; instance++) { 164 inst = &dev_inst[instance]; 165 inst->ctl = dev_ctl; 166 inst->nr_blocks = nr_blocks; 167 blk_p = &dev_blk[instance * nr_blocks]; 168 inst->blocks = blk_p; 169 170 /* name of this instance */ 171 snprintf(inst->name, sizeof(inst->name), 172 "%s%u", edac_device_name, instance); 173 174 /* Initialize every block in each instance */ 175 for (block = 0; block < nr_blocks; block++) { 176 blk = &blk_p[block]; 177 blk->instance = inst; 178 snprintf(blk->name, sizeof(blk->name), 179 "%s%d", edac_block_name, block+offset_value); 180 181 edac_dbg(4, "instance=%d inst_p=%p block=#%d block_p=%p name='%s'\n", 182 instance, inst, block, blk, blk->name); 183 184 /* if there are NO attributes OR no attribute pointer 185 * then continue on to next block iteration 186 */ 187 if ((nr_attrib == 0) || (attrib_spec == NULL)) 188 continue; 189 190 /* setup the attribute array for this block */ 191 blk->nr_attribs = nr_attrib; 192 attrib_p = &dev_attrib[block*nr_instances*nr_attrib]; 193 blk->block_attributes = attrib_p; 194 195 edac_dbg(4, "THIS BLOCK_ATTRIB=%p\n", 196 blk->block_attributes); 197 198 /* Initialize every user specified attribute in this 199 * block with the data the caller passed in 200 * Each block gets its own copy of pointers, 201 * and its unique 'value' 202 */ 203 for (attr = 0; attr < nr_attrib; attr++) { 204 attrib = &attrib_p[attr]; 205 206 /* populate the unique per attrib 207 * with the code pointers and info 208 */ 209 attrib->attr = attrib_spec[attr].attr; 210 attrib->show = attrib_spec[attr].show; 211 attrib->store = attrib_spec[attr].store; 212 213 attrib->block = blk; /* up link */ 214 215 edac_dbg(4, "alloc-attrib=%p attrib_name='%s' attrib-spec=%p spec-name=%s\n", 216 attrib, attrib->attr.name, 217 &attrib_spec[attr], 218 attrib_spec[attr].attr.name 219 ); 220 } 221 } 222 } 223 224 /* Mark this instance as merely ALLOCATED */ 225 dev_ctl->op_state = OP_ALLOC; 226 227 /* 228 * Initialize the 'root' kobj for the edac_device controller 229 */ 230 err = edac_device_register_sysfs_main_kobj(dev_ctl); 231 if (err) { 232 kfree(dev_ctl); 233 return NULL; 234 } 235 236 /* at this point, the root kobj is valid, and in order to 237 * 'free' the object, then the function: 238 * edac_device_unregister_sysfs_main_kobj() must be called 239 * which will perform kobj unregistration and the actual free 240 * will occur during the kobject callback operation 241 */ 242 243 return dev_ctl; 244 } 245 EXPORT_SYMBOL_GPL(edac_device_alloc_ctl_info); 246 247 /* 248 * edac_device_free_ctl_info() 249 * frees the memory allocated by the edac_device_alloc_ctl_info() 250 * function 251 */ 252 void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info) 253 { 254 edac_device_unregister_sysfs_main_kobj(ctl_info); 255 } 256 EXPORT_SYMBOL_GPL(edac_device_free_ctl_info); 257 258 /* 259 * find_edac_device_by_dev 260 * scans the edac_device list for a specific 'struct device *' 261 * 262 * lock to be held prior to call: device_ctls_mutex 263 * 264 * Return: 265 * pointer to control structure managing 'dev' 266 * NULL if not found on list 267 */ 268 static struct edac_device_ctl_info *find_edac_device_by_dev(struct device *dev) 269 { 270 struct edac_device_ctl_info *edac_dev; 271 struct list_head *item; 272 273 edac_dbg(0, "\n"); 274 275 list_for_each(item, &edac_device_list) { 276 edac_dev = list_entry(item, struct edac_device_ctl_info, link); 277 278 if (edac_dev->dev == dev) 279 return edac_dev; 280 } 281 282 return NULL; 283 } 284 285 /* 286 * add_edac_dev_to_global_list 287 * Before calling this function, caller must 288 * assign a unique value to edac_dev->dev_idx. 289 * 290 * lock to be held prior to call: device_ctls_mutex 291 * 292 * Return: 293 * 0 on success 294 * 1 on failure. 295 */ 296 static int add_edac_dev_to_global_list(struct edac_device_ctl_info *edac_dev) 297 { 298 struct list_head *item, *insert_before; 299 struct edac_device_ctl_info *rover; 300 301 insert_before = &edac_device_list; 302 303 /* Determine if already on the list */ 304 rover = find_edac_device_by_dev(edac_dev->dev); 305 if (unlikely(rover != NULL)) 306 goto fail0; 307 308 /* Insert in ascending order by 'dev_idx', so find position */ 309 list_for_each(item, &edac_device_list) { 310 rover = list_entry(item, struct edac_device_ctl_info, link); 311 312 if (rover->dev_idx >= edac_dev->dev_idx) { 313 if (unlikely(rover->dev_idx == edac_dev->dev_idx)) 314 goto fail1; 315 316 insert_before = item; 317 break; 318 } 319 } 320 321 list_add_tail_rcu(&edac_dev->link, insert_before); 322 return 0; 323 324 fail0: 325 edac_printk(KERN_WARNING, EDAC_MC, 326 "%s (%s) %s %s already assigned %d\n", 327 dev_name(rover->dev), edac_dev_name(rover), 328 rover->mod_name, rover->ctl_name, rover->dev_idx); 329 return 1; 330 331 fail1: 332 edac_printk(KERN_WARNING, EDAC_MC, 333 "bug in low-level driver: attempt to assign\n" 334 " duplicate dev_idx %d in %s()\n", rover->dev_idx, 335 __func__); 336 return 1; 337 } 338 339 /* 340 * del_edac_device_from_global_list 341 */ 342 static void del_edac_device_from_global_list(struct edac_device_ctl_info 343 *edac_device) 344 { 345 list_del_rcu(&edac_device->link); 346 347 /* these are for safe removal of devices from global list while 348 * NMI handlers may be traversing list 349 */ 350 synchronize_rcu(); 351 INIT_LIST_HEAD(&edac_device->link); 352 } 353 354 /* 355 * edac_device_workq_function 356 * performs the operation scheduled by a workq request 357 * 358 * this workq is embedded within an edac_device_ctl_info 359 * structure, that needs to be polled for possible error events. 360 * 361 * This operation is to acquire the list mutex lock 362 * (thus preventing insertation or deletion) 363 * and then call the device's poll function IFF this device is 364 * running polled and there is a poll function defined. 365 */ 366 static void edac_device_workq_function(struct work_struct *work_req) 367 { 368 struct delayed_work *d_work = to_delayed_work(work_req); 369 struct edac_device_ctl_info *edac_dev = to_edac_device_ctl_work(d_work); 370 371 mutex_lock(&device_ctls_mutex); 372 373 /* If we are being removed, bail out immediately */ 374 if (edac_dev->op_state == OP_OFFLINE) { 375 mutex_unlock(&device_ctls_mutex); 376 return; 377 } 378 379 /* Only poll controllers that are running polled and have a check */ 380 if ((edac_dev->op_state == OP_RUNNING_POLL) && 381 (edac_dev->edac_check != NULL)) { 382 edac_dev->edac_check(edac_dev); 383 } 384 385 mutex_unlock(&device_ctls_mutex); 386 387 /* Reschedule the workq for the next time period to start again 388 * if the number of msec is for 1 sec, then adjust to the next 389 * whole one second to save timers firing all over the period 390 * between integral seconds 391 */ 392 if (edac_dev->poll_msec == 1000) 393 queue_delayed_work(edac_workqueue, &edac_dev->work, 394 round_jiffies_relative(edac_dev->delay)); 395 else 396 queue_delayed_work(edac_workqueue, &edac_dev->work, 397 edac_dev->delay); 398 } 399 400 /* 401 * edac_device_workq_setup 402 * initialize a workq item for this edac_device instance 403 * passing in the new delay period in msec 404 */ 405 void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev, 406 unsigned msec) 407 { 408 edac_dbg(0, "\n"); 409 410 /* take the arg 'msec' and set it into the control structure 411 * to used in the time period calculation 412 * then calc the number of jiffies that represents 413 */ 414 edac_dev->poll_msec = msec; 415 edac_dev->delay = msecs_to_jiffies(msec); 416 417 INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function); 418 419 /* optimize here for the 1 second case, which will be normal value, to 420 * fire ON the 1 second time event. This helps reduce all sorts of 421 * timers firing on sub-second basis, while they are happy 422 * to fire together on the 1 second exactly 423 */ 424 if (edac_dev->poll_msec == 1000) 425 queue_delayed_work(edac_workqueue, &edac_dev->work, 426 round_jiffies_relative(edac_dev->delay)); 427 else 428 queue_delayed_work(edac_workqueue, &edac_dev->work, 429 edac_dev->delay); 430 } 431 432 /* 433 * edac_device_workq_teardown 434 * stop the workq processing on this edac_dev 435 */ 436 void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev) 437 { 438 int status; 439 440 if (!edac_dev->edac_check) 441 return; 442 443 status = cancel_delayed_work(&edac_dev->work); 444 if (status == 0) { 445 /* workq instance might be running, wait for it */ 446 flush_workqueue(edac_workqueue); 447 } 448 } 449 450 /* 451 * edac_device_reset_delay_period 452 * 453 * need to stop any outstanding workq queued up at this time 454 * because we will be resetting the sleep time. 455 * Then restart the workq on the new delay 456 */ 457 void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev, 458 unsigned long value) 459 { 460 /* cancel the current workq request, without the mutex lock */ 461 edac_device_workq_teardown(edac_dev); 462 463 /* acquire the mutex before doing the workq setup */ 464 mutex_lock(&device_ctls_mutex); 465 466 /* restart the workq request, with new delay value */ 467 edac_device_workq_setup(edac_dev, value); 468 469 mutex_unlock(&device_ctls_mutex); 470 } 471 472 /* 473 * edac_device_alloc_index: Allocate a unique device index number 474 * 475 * Return: 476 * allocated index number 477 */ 478 int edac_device_alloc_index(void) 479 { 480 static atomic_t device_indexes = ATOMIC_INIT(0); 481 482 return atomic_inc_return(&device_indexes) - 1; 483 } 484 EXPORT_SYMBOL_GPL(edac_device_alloc_index); 485 486 /** 487 * edac_device_add_device: Insert the 'edac_dev' structure into the 488 * edac_device global list and create sysfs entries associated with 489 * edac_device structure. 490 * @edac_device: pointer to the edac_device structure to be added to the list 491 * 'edac_device' structure. 492 * 493 * Return: 494 * 0 Success 495 * !0 Failure 496 */ 497 int edac_device_add_device(struct edac_device_ctl_info *edac_dev) 498 { 499 edac_dbg(0, "\n"); 500 501 #ifdef CONFIG_EDAC_DEBUG 502 if (edac_debug_level >= 3) 503 edac_device_dump_device(edac_dev); 504 #endif 505 mutex_lock(&device_ctls_mutex); 506 507 if (add_edac_dev_to_global_list(edac_dev)) 508 goto fail0; 509 510 /* set load time so that error rate can be tracked */ 511 edac_dev->start_time = jiffies; 512 513 /* create this instance's sysfs entries */ 514 if (edac_device_create_sysfs(edac_dev)) { 515 edac_device_printk(edac_dev, KERN_WARNING, 516 "failed to create sysfs device\n"); 517 goto fail1; 518 } 519 520 /* If there IS a check routine, then we are running POLLED */ 521 if (edac_dev->edac_check != NULL) { 522 /* This instance is NOW RUNNING */ 523 edac_dev->op_state = OP_RUNNING_POLL; 524 525 /* 526 * enable workq processing on this instance, 527 * default = 1000 msec 528 */ 529 edac_device_workq_setup(edac_dev, 1000); 530 } else { 531 edac_dev->op_state = OP_RUNNING_INTERRUPT; 532 } 533 534 /* Report action taken */ 535 edac_device_printk(edac_dev, KERN_INFO, 536 "Giving out device to module %s controller %s: DEV %s (%s)\n", 537 edac_dev->mod_name, edac_dev->ctl_name, edac_dev->dev_name, 538 edac_op_state_to_string(edac_dev->op_state)); 539 540 mutex_unlock(&device_ctls_mutex); 541 return 0; 542 543 fail1: 544 /* Some error, so remove the entry from the lsit */ 545 del_edac_device_from_global_list(edac_dev); 546 547 fail0: 548 mutex_unlock(&device_ctls_mutex); 549 return 1; 550 } 551 EXPORT_SYMBOL_GPL(edac_device_add_device); 552 553 /** 554 * edac_device_del_device: 555 * Remove sysfs entries for specified edac_device structure and 556 * then remove edac_device structure from global list 557 * 558 * @dev: 559 * Pointer to 'struct device' representing edac_device 560 * structure to remove. 561 * 562 * Return: 563 * Pointer to removed edac_device structure, 564 * OR NULL if device not found. 565 */ 566 struct edac_device_ctl_info *edac_device_del_device(struct device *dev) 567 { 568 struct edac_device_ctl_info *edac_dev; 569 570 edac_dbg(0, "\n"); 571 572 mutex_lock(&device_ctls_mutex); 573 574 /* Find the structure on the list, if not there, then leave */ 575 edac_dev = find_edac_device_by_dev(dev); 576 if (edac_dev == NULL) { 577 mutex_unlock(&device_ctls_mutex); 578 return NULL; 579 } 580 581 /* mark this instance as OFFLINE */ 582 edac_dev->op_state = OP_OFFLINE; 583 584 /* deregister from global list */ 585 del_edac_device_from_global_list(edac_dev); 586 587 mutex_unlock(&device_ctls_mutex); 588 589 /* clear workq processing on this instance */ 590 edac_device_workq_teardown(edac_dev); 591 592 /* Tear down the sysfs entries for this instance */ 593 edac_device_remove_sysfs(edac_dev); 594 595 edac_printk(KERN_INFO, EDAC_MC, 596 "Removed device %d for %s %s: DEV %s\n", 597 edac_dev->dev_idx, 598 edac_dev->mod_name, edac_dev->ctl_name, edac_dev_name(edac_dev)); 599 600 return edac_dev; 601 } 602 EXPORT_SYMBOL_GPL(edac_device_del_device); 603 604 static inline int edac_device_get_log_ce(struct edac_device_ctl_info *edac_dev) 605 { 606 return edac_dev->log_ce; 607 } 608 609 static inline int edac_device_get_log_ue(struct edac_device_ctl_info *edac_dev) 610 { 611 return edac_dev->log_ue; 612 } 613 614 static inline int edac_device_get_panic_on_ue(struct edac_device_ctl_info 615 *edac_dev) 616 { 617 return edac_dev->panic_on_ue; 618 } 619 620 /* 621 * edac_device_handle_ce 622 * perform a common output and handling of an 'edac_dev' CE event 623 */ 624 void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev, 625 int inst_nr, int block_nr, const char *msg) 626 { 627 struct edac_device_instance *instance; 628 struct edac_device_block *block = NULL; 629 630 if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) { 631 edac_device_printk(edac_dev, KERN_ERR, 632 "INTERNAL ERROR: 'instance' out of range " 633 "(%d >= %d)\n", inst_nr, 634 edac_dev->nr_instances); 635 return; 636 } 637 638 instance = edac_dev->instances + inst_nr; 639 640 if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) { 641 edac_device_printk(edac_dev, KERN_ERR, 642 "INTERNAL ERROR: instance %d 'block' " 643 "out of range (%d >= %d)\n", 644 inst_nr, block_nr, 645 instance->nr_blocks); 646 return; 647 } 648 649 if (instance->nr_blocks > 0) { 650 block = instance->blocks + block_nr; 651 block->counters.ce_count++; 652 } 653 654 /* Propagate the count up the 'totals' tree */ 655 instance->counters.ce_count++; 656 edac_dev->counters.ce_count++; 657 658 if (edac_device_get_log_ce(edac_dev)) 659 edac_device_printk(edac_dev, KERN_WARNING, 660 "CE: %s instance: %s block: %s '%s'\n", 661 edac_dev->ctl_name, instance->name, 662 block ? block->name : "N/A", msg); 663 } 664 EXPORT_SYMBOL_GPL(edac_device_handle_ce); 665 666 /* 667 * edac_device_handle_ue 668 * perform a common output and handling of an 'edac_dev' UE event 669 */ 670 void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev, 671 int inst_nr, int block_nr, const char *msg) 672 { 673 struct edac_device_instance *instance; 674 struct edac_device_block *block = NULL; 675 676 if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) { 677 edac_device_printk(edac_dev, KERN_ERR, 678 "INTERNAL ERROR: 'instance' out of range " 679 "(%d >= %d)\n", inst_nr, 680 edac_dev->nr_instances); 681 return; 682 } 683 684 instance = edac_dev->instances + inst_nr; 685 686 if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) { 687 edac_device_printk(edac_dev, KERN_ERR, 688 "INTERNAL ERROR: instance %d 'block' " 689 "out of range (%d >= %d)\n", 690 inst_nr, block_nr, 691 instance->nr_blocks); 692 return; 693 } 694 695 if (instance->nr_blocks > 0) { 696 block = instance->blocks + block_nr; 697 block->counters.ue_count++; 698 } 699 700 /* Propagate the count up the 'totals' tree */ 701 instance->counters.ue_count++; 702 edac_dev->counters.ue_count++; 703 704 if (edac_device_get_log_ue(edac_dev)) 705 edac_device_printk(edac_dev, KERN_EMERG, 706 "UE: %s instance: %s block: %s '%s'\n", 707 edac_dev->ctl_name, instance->name, 708 block ? block->name : "N/A", msg); 709 710 if (edac_device_get_panic_on_ue(edac_dev)) 711 panic("EDAC %s: UE instance: %s block %s '%s'\n", 712 edac_dev->ctl_name, instance->name, 713 block ? block->name : "N/A", msg); 714 } 715 EXPORT_SYMBOL_GPL(edac_device_handle_ue); 716