1 /* 2 * The file intends to implement the platform dependent EEH operations on pseries. 3 * Actually, the pseries platform is built based on RTAS heavily. That means the 4 * pseries platform dependent EEH operations will be built on RTAS calls. The functions 5 * are derived from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has 6 * been done. 7 * 8 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2011. 9 * Copyright IBM Corporation 2001, 2005, 2006 10 * Copyright Dave Engebretsen & Todd Inglett 2001 11 * Copyright Linas Vepstas 2005, 2006 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License 24 * along with this program; if not, write to the Free Software 25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 26 */ 27 28 #include <linux/atomic.h> 29 #include <linux/delay.h> 30 #include <linux/export.h> 31 #include <linux/init.h> 32 #include <linux/list.h> 33 #include <linux/of.h> 34 #include <linux/pci.h> 35 #include <linux/proc_fs.h> 36 #include <linux/rbtree.h> 37 #include <linux/sched.h> 38 #include <linux/seq_file.h> 39 #include <linux/spinlock.h> 40 41 #include <asm/eeh.h> 42 #include <asm/eeh_event.h> 43 #include <asm/io.h> 44 #include <asm/machdep.h> 45 #include <asm/ppc-pci.h> 46 #include <asm/rtas.h> 47 48 /* RTAS tokens */ 49 static int ibm_set_eeh_option; 50 static int ibm_set_slot_reset; 51 static int ibm_read_slot_reset_state; 52 static int ibm_read_slot_reset_state2; 53 static int ibm_slot_error_detail; 54 static int ibm_get_config_addr_info; 55 static int ibm_get_config_addr_info2; 56 static int ibm_configure_pe; 57 58 /* 59 * Buffer for reporting slot-error-detail rtas calls. Its here 60 * in BSS, and not dynamically alloced, so that it ends up in 61 * RMO where RTAS can access it. 62 */ 63 static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX]; 64 static DEFINE_SPINLOCK(slot_errbuf_lock); 65 static int eeh_error_buf_size; 66 67 /** 68 * pseries_eeh_init - EEH platform dependent initialization 69 * 70 * EEH platform dependent initialization on pseries. 71 */ 72 static int pseries_eeh_init(void) 73 { 74 /* figure out EEH RTAS function call tokens */ 75 ibm_set_eeh_option = rtas_token("ibm,set-eeh-option"); 76 ibm_set_slot_reset = rtas_token("ibm,set-slot-reset"); 77 ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2"); 78 ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state"); 79 ibm_slot_error_detail = rtas_token("ibm,slot-error-detail"); 80 ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); 81 ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); 82 ibm_configure_pe = rtas_token("ibm,configure-pe"); 83 84 /* 85 * ibm,configure-pe and ibm,configure-bridge have the same semantics, 86 * however ibm,configure-pe can be faster. If we can't find 87 * ibm,configure-pe then fall back to using ibm,configure-bridge. 88 */ 89 if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE) 90 ibm_configure_pe = rtas_token("ibm,configure-bridge"); 91 92 /* 93 * Necessary sanity check. We needn't check "get-config-addr-info" 94 * and its variant since the old firmware probably support address 95 * of domain/bus/slot/function for EEH RTAS operations. 96 */ 97 if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE || 98 ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE || 99 (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE && 100 ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) || 101 ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE || 102 ibm_configure_pe == RTAS_UNKNOWN_SERVICE) { 103 pr_info("EEH functionality not supported\n"); 104 return -EINVAL; 105 } 106 107 /* Initialize error log lock and size */ 108 spin_lock_init(&slot_errbuf_lock); 109 eeh_error_buf_size = rtas_token("rtas-error-log-max"); 110 if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) { 111 pr_info("%s: unknown EEH error log size\n", 112 __func__); 113 eeh_error_buf_size = 1024; 114 } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) { 115 pr_info("%s: EEH error log size %d exceeds the maximal %d\n", 116 __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX); 117 eeh_error_buf_size = RTAS_ERROR_LOG_MAX; 118 } 119 120 /* Set EEH probe mode */ 121 eeh_add_flag(EEH_PROBE_MODE_DEVTREE | EEH_ENABLE_IO_FOR_LOG); 122 123 return 0; 124 } 125 126 static int pseries_eeh_cap_start(struct pci_dn *pdn) 127 { 128 u32 status; 129 130 if (!pdn) 131 return 0; 132 133 rtas_read_config(pdn, PCI_STATUS, 2, &status); 134 if (!(status & PCI_STATUS_CAP_LIST)) 135 return 0; 136 137 return PCI_CAPABILITY_LIST; 138 } 139 140 141 static int pseries_eeh_find_cap(struct pci_dn *pdn, int cap) 142 { 143 int pos = pseries_eeh_cap_start(pdn); 144 int cnt = 48; /* Maximal number of capabilities */ 145 u32 id; 146 147 if (!pos) 148 return 0; 149 150 while (cnt--) { 151 rtas_read_config(pdn, pos, 1, &pos); 152 if (pos < 0x40) 153 break; 154 pos &= ~3; 155 rtas_read_config(pdn, pos + PCI_CAP_LIST_ID, 1, &id); 156 if (id == 0xff) 157 break; 158 if (id == cap) 159 return pos; 160 pos += PCI_CAP_LIST_NEXT; 161 } 162 163 return 0; 164 } 165 166 static int pseries_eeh_find_ecap(struct pci_dn *pdn, int cap) 167 { 168 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 169 u32 header; 170 int pos = 256; 171 int ttl = (4096 - 256) / 8; 172 173 if (!edev || !edev->pcie_cap) 174 return 0; 175 if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) 176 return 0; 177 else if (!header) 178 return 0; 179 180 while (ttl-- > 0) { 181 if (PCI_EXT_CAP_ID(header) == cap && pos) 182 return pos; 183 184 pos = PCI_EXT_CAP_NEXT(header); 185 if (pos < 256) 186 break; 187 188 if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) 189 break; 190 } 191 192 return 0; 193 } 194 195 /** 196 * pseries_eeh_probe - EEH probe on the given device 197 * @pdn: PCI device node 198 * @data: Unused 199 * 200 * When EEH module is installed during system boot, all PCI devices 201 * are checked one by one to see if it supports EEH. The function 202 * is introduced for the purpose. 203 */ 204 static void *pseries_eeh_probe(struct pci_dn *pdn, void *data) 205 { 206 struct eeh_dev *edev; 207 struct eeh_pe pe; 208 u32 pcie_flags; 209 int enable = 0; 210 int ret; 211 212 /* Retrieve OF node and eeh device */ 213 edev = pdn_to_eeh_dev(pdn); 214 if (!edev || edev->pe) 215 return NULL; 216 217 /* Check class/vendor/device IDs */ 218 if (!pdn->vendor_id || !pdn->device_id || !pdn->class_code) 219 return NULL; 220 221 /* Skip for PCI-ISA bridge */ 222 if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA) 223 return NULL; 224 225 /* 226 * Update class code and mode of eeh device. We need 227 * correctly reflects that current device is root port 228 * or PCIe switch downstream port. 229 */ 230 edev->class_code = pdn->class_code; 231 edev->pcix_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_PCIX); 232 edev->pcie_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_EXP); 233 edev->aer_cap = pseries_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR); 234 edev->mode &= 0xFFFFFF00; 235 if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) { 236 edev->mode |= EEH_DEV_BRIDGE; 237 if (edev->pcie_cap) { 238 rtas_read_config(pdn, edev->pcie_cap + PCI_EXP_FLAGS, 239 2, &pcie_flags); 240 pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4; 241 if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT) 242 edev->mode |= EEH_DEV_ROOT_PORT; 243 else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM) 244 edev->mode |= EEH_DEV_DS_PORT; 245 } 246 } 247 248 /* Initialize the fake PE */ 249 memset(&pe, 0, sizeof(struct eeh_pe)); 250 pe.phb = edev->phb; 251 pe.config_addr = (pdn->busno << 16) | (pdn->devfn << 8); 252 253 /* Enable EEH on the device */ 254 ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE); 255 if (!ret) { 256 /* Retrieve PE address */ 257 edev->config_addr = (pdn->busno << 16) | (pdn->devfn << 8); 258 edev->pe_config_addr = eeh_ops->get_pe_addr(&pe); 259 pe.addr = edev->pe_config_addr; 260 261 /* Some older systems (Power4) allow the ibm,set-eeh-option 262 * call to succeed even on nodes where EEH is not supported. 263 * Verify support explicitly. 264 */ 265 ret = eeh_ops->get_state(&pe, NULL); 266 if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT) 267 enable = 1; 268 269 if (enable) { 270 eeh_add_flag(EEH_ENABLED); 271 eeh_add_to_parent_pe(edev); 272 273 pr_debug("%s: EEH enabled on %02x:%02x.%01x PHB#%x-PE#%x\n", 274 __func__, pdn->busno, PCI_SLOT(pdn->devfn), 275 PCI_FUNC(pdn->devfn), pe.phb->global_number, 276 pe.addr); 277 } else if (pdn->parent && pdn_to_eeh_dev(pdn->parent) && 278 (pdn_to_eeh_dev(pdn->parent))->pe) { 279 /* This device doesn't support EEH, but it may have an 280 * EEH parent, in which case we mark it as supported. 281 */ 282 edev->config_addr = pdn_to_eeh_dev(pdn->parent)->config_addr; 283 edev->pe_config_addr = pdn_to_eeh_dev(pdn->parent)->pe_config_addr; 284 eeh_add_to_parent_pe(edev); 285 } 286 } 287 288 /* Save memory bars */ 289 eeh_save_bars(edev); 290 291 return NULL; 292 } 293 294 /** 295 * pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable 296 * @pe: EEH PE 297 * @option: operation to be issued 298 * 299 * The function is used to control the EEH functionality globally. 300 * Currently, following options are support according to PAPR: 301 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA 302 */ 303 static int pseries_eeh_set_option(struct eeh_pe *pe, int option) 304 { 305 int ret = 0; 306 int config_addr; 307 308 /* 309 * When we're enabling or disabling EEH functioality on 310 * the particular PE, the PE config address is possibly 311 * unavailable. Therefore, we have to figure it out from 312 * the FDT node. 313 */ 314 switch (option) { 315 case EEH_OPT_DISABLE: 316 case EEH_OPT_ENABLE: 317 case EEH_OPT_THAW_MMIO: 318 case EEH_OPT_THAW_DMA: 319 config_addr = pe->config_addr; 320 if (pe->addr) 321 config_addr = pe->addr; 322 break; 323 case EEH_OPT_FREEZE_PE: 324 /* Not support */ 325 return 0; 326 default: 327 pr_err("%s: Invalid option %d\n", 328 __func__, option); 329 return -EINVAL; 330 } 331 332 ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL, 333 config_addr, BUID_HI(pe->phb->buid), 334 BUID_LO(pe->phb->buid), option); 335 336 return ret; 337 } 338 339 /** 340 * pseries_eeh_get_pe_addr - Retrieve PE address 341 * @pe: EEH PE 342 * 343 * Retrieve the assocated PE address. Actually, there're 2 RTAS 344 * function calls dedicated for the purpose. We need implement 345 * it through the new function and then the old one. Besides, 346 * you should make sure the config address is figured out from 347 * FDT node before calling the function. 348 * 349 * It's notable that zero'ed return value means invalid PE config 350 * address. 351 */ 352 static int pseries_eeh_get_pe_addr(struct eeh_pe *pe) 353 { 354 int ret = 0; 355 int rets[3]; 356 357 if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) { 358 /* 359 * First of all, we need to make sure there has one PE 360 * associated with the device. Otherwise, PE address is 361 * meaningless. 362 */ 363 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, 364 pe->config_addr, BUID_HI(pe->phb->buid), 365 BUID_LO(pe->phb->buid), 1); 366 if (ret || (rets[0] == 0)) 367 return 0; 368 369 /* Retrieve the associated PE config address */ 370 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, 371 pe->config_addr, BUID_HI(pe->phb->buid), 372 BUID_LO(pe->phb->buid), 0); 373 if (ret) { 374 pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n", 375 __func__, pe->phb->global_number, pe->config_addr); 376 return 0; 377 } 378 379 return rets[0]; 380 } 381 382 if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) { 383 ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets, 384 pe->config_addr, BUID_HI(pe->phb->buid), 385 BUID_LO(pe->phb->buid), 0); 386 if (ret) { 387 pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n", 388 __func__, pe->phb->global_number, pe->config_addr); 389 return 0; 390 } 391 392 return rets[0]; 393 } 394 395 return ret; 396 } 397 398 /** 399 * pseries_eeh_get_state - Retrieve PE state 400 * @pe: EEH PE 401 * @state: return value 402 * 403 * Retrieve the state of the specified PE. On RTAS compliant 404 * pseries platform, there already has one dedicated RTAS function 405 * for the purpose. It's notable that the associated PE config address 406 * might be ready when calling the function. Therefore, endeavour to 407 * use the PE config address if possible. Further more, there're 2 408 * RTAS calls for the purpose, we need to try the new one and back 409 * to the old one if the new one couldn't work properly. 410 */ 411 static int pseries_eeh_get_state(struct eeh_pe *pe, int *state) 412 { 413 int config_addr; 414 int ret; 415 int rets[4]; 416 int result; 417 418 /* Figure out PE config address if possible */ 419 config_addr = pe->config_addr; 420 if (pe->addr) 421 config_addr = pe->addr; 422 423 if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) { 424 ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets, 425 config_addr, BUID_HI(pe->phb->buid), 426 BUID_LO(pe->phb->buid)); 427 } else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) { 428 /* Fake PE unavailable info */ 429 rets[2] = 0; 430 ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets, 431 config_addr, BUID_HI(pe->phb->buid), 432 BUID_LO(pe->phb->buid)); 433 } else { 434 return EEH_STATE_NOT_SUPPORT; 435 } 436 437 if (ret) 438 return ret; 439 440 /* Parse the result out */ 441 if (!rets[1]) 442 return EEH_STATE_NOT_SUPPORT; 443 444 switch(rets[0]) { 445 case 0: 446 result = EEH_STATE_MMIO_ACTIVE | 447 EEH_STATE_DMA_ACTIVE; 448 break; 449 case 1: 450 result = EEH_STATE_RESET_ACTIVE | 451 EEH_STATE_MMIO_ACTIVE | 452 EEH_STATE_DMA_ACTIVE; 453 break; 454 case 2: 455 result = 0; 456 break; 457 case 4: 458 result = EEH_STATE_MMIO_ENABLED; 459 break; 460 case 5: 461 if (rets[2]) { 462 if (state) *state = rets[2]; 463 result = EEH_STATE_UNAVAILABLE; 464 } else { 465 result = EEH_STATE_NOT_SUPPORT; 466 } 467 break; 468 default: 469 result = EEH_STATE_NOT_SUPPORT; 470 } 471 472 return result; 473 } 474 475 /** 476 * pseries_eeh_reset - Reset the specified PE 477 * @pe: EEH PE 478 * @option: reset option 479 * 480 * Reset the specified PE 481 */ 482 static int pseries_eeh_reset(struct eeh_pe *pe, int option) 483 { 484 int config_addr; 485 int ret; 486 487 /* Figure out PE address */ 488 config_addr = pe->config_addr; 489 if (pe->addr) 490 config_addr = pe->addr; 491 492 /* Reset PE through RTAS call */ 493 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, 494 config_addr, BUID_HI(pe->phb->buid), 495 BUID_LO(pe->phb->buid), option); 496 497 /* If fundamental-reset not supported, try hot-reset */ 498 if (option == EEH_RESET_FUNDAMENTAL && 499 ret == -8) { 500 option = EEH_RESET_HOT; 501 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, 502 config_addr, BUID_HI(pe->phb->buid), 503 BUID_LO(pe->phb->buid), option); 504 } 505 506 /* We need reset hold or settlement delay */ 507 if (option == EEH_RESET_FUNDAMENTAL || 508 option == EEH_RESET_HOT) 509 msleep(EEH_PE_RST_HOLD_TIME); 510 else 511 msleep(EEH_PE_RST_SETTLE_TIME); 512 513 return ret; 514 } 515 516 /** 517 * pseries_eeh_wait_state - Wait for PE state 518 * @pe: EEH PE 519 * @max_wait: maximal period in millisecond 520 * 521 * Wait for the state of associated PE. It might take some time 522 * to retrieve the PE's state. 523 */ 524 static int pseries_eeh_wait_state(struct eeh_pe *pe, int max_wait) 525 { 526 int ret; 527 int mwait; 528 529 /* 530 * According to PAPR, the state of PE might be temporarily 531 * unavailable. Under the circumstance, we have to wait 532 * for indicated time determined by firmware. The maximal 533 * wait time is 5 minutes, which is acquired from the original 534 * EEH implementation. Also, the original implementation 535 * also defined the minimal wait time as 1 second. 536 */ 537 #define EEH_STATE_MIN_WAIT_TIME (1000) 538 #define EEH_STATE_MAX_WAIT_TIME (300 * 1000) 539 540 while (1) { 541 ret = pseries_eeh_get_state(pe, &mwait); 542 543 /* 544 * If the PE's state is temporarily unavailable, 545 * we have to wait for the specified time. Otherwise, 546 * the PE's state will be returned immediately. 547 */ 548 if (ret != EEH_STATE_UNAVAILABLE) 549 return ret; 550 551 if (max_wait <= 0) { 552 pr_warn("%s: Timeout when getting PE's state (%d)\n", 553 __func__, max_wait); 554 return EEH_STATE_NOT_SUPPORT; 555 } 556 557 if (mwait <= 0) { 558 pr_warn("%s: Firmware returned bad wait value %d\n", 559 __func__, mwait); 560 mwait = EEH_STATE_MIN_WAIT_TIME; 561 } else if (mwait > EEH_STATE_MAX_WAIT_TIME) { 562 pr_warn("%s: Firmware returned too long wait value %d\n", 563 __func__, mwait); 564 mwait = EEH_STATE_MAX_WAIT_TIME; 565 } 566 567 max_wait -= mwait; 568 msleep(mwait); 569 } 570 571 return EEH_STATE_NOT_SUPPORT; 572 } 573 574 /** 575 * pseries_eeh_get_log - Retrieve error log 576 * @pe: EEH PE 577 * @severity: temporary or permanent error log 578 * @drv_log: driver log to be combined with retrieved error log 579 * @len: length of driver log 580 * 581 * Retrieve the temporary or permanent error from the PE. 582 * Actually, the error will be retrieved through the dedicated 583 * RTAS call. 584 */ 585 static int pseries_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len) 586 { 587 int config_addr; 588 unsigned long flags; 589 int ret; 590 591 spin_lock_irqsave(&slot_errbuf_lock, flags); 592 memset(slot_errbuf, 0, eeh_error_buf_size); 593 594 /* Figure out the PE address */ 595 config_addr = pe->config_addr; 596 if (pe->addr) 597 config_addr = pe->addr; 598 599 ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, config_addr, 600 BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid), 601 virt_to_phys(drv_log), len, 602 virt_to_phys(slot_errbuf), eeh_error_buf_size, 603 severity); 604 if (!ret) 605 log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0); 606 spin_unlock_irqrestore(&slot_errbuf_lock, flags); 607 608 return ret; 609 } 610 611 /** 612 * pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE 613 * @pe: EEH PE 614 * 615 * The function will be called to reconfigure the bridges included 616 * in the specified PE so that the mulfunctional PE would be recovered 617 * again. 618 */ 619 static int pseries_eeh_configure_bridge(struct eeh_pe *pe) 620 { 621 int config_addr; 622 int ret; 623 /* Waiting 0.2s maximum before skipping configuration */ 624 int max_wait = 200; 625 626 /* Figure out the PE address */ 627 config_addr = pe->config_addr; 628 if (pe->addr) 629 config_addr = pe->addr; 630 631 while (max_wait > 0) { 632 ret = rtas_call(ibm_configure_pe, 3, 1, NULL, 633 config_addr, BUID_HI(pe->phb->buid), 634 BUID_LO(pe->phb->buid)); 635 636 if (!ret) 637 return ret; 638 639 /* 640 * If RTAS returns a delay value that's above 100ms, cut it 641 * down to 100ms in case firmware made a mistake. For more 642 * on how these delay values work see rtas_busy_delay_time 643 */ 644 if (ret > RTAS_EXTENDED_DELAY_MIN+2 && 645 ret <= RTAS_EXTENDED_DELAY_MAX) 646 ret = RTAS_EXTENDED_DELAY_MIN+2; 647 648 max_wait -= rtas_busy_delay_time(ret); 649 650 if (max_wait < 0) 651 break; 652 653 rtas_busy_delay(ret); 654 } 655 656 pr_warn("%s: Unable to configure bridge PHB#%x-PE#%x (%d)\n", 657 __func__, pe->phb->global_number, pe->addr, ret); 658 return ret; 659 } 660 661 /** 662 * pseries_eeh_read_config - Read PCI config space 663 * @pdn: PCI device node 664 * @where: PCI address 665 * @size: size to read 666 * @val: return value 667 * 668 * Read config space from the speicifed device 669 */ 670 static int pseries_eeh_read_config(struct pci_dn *pdn, int where, int size, u32 *val) 671 { 672 return rtas_read_config(pdn, where, size, val); 673 } 674 675 /** 676 * pseries_eeh_write_config - Write PCI config space 677 * @pdn: PCI device node 678 * @where: PCI address 679 * @size: size to write 680 * @val: value to be written 681 * 682 * Write config space to the specified device 683 */ 684 static int pseries_eeh_write_config(struct pci_dn *pdn, int where, int size, u32 val) 685 { 686 return rtas_write_config(pdn, where, size, val); 687 } 688 689 static struct eeh_ops pseries_eeh_ops = { 690 .name = "pseries", 691 .init = pseries_eeh_init, 692 .probe = pseries_eeh_probe, 693 .set_option = pseries_eeh_set_option, 694 .get_pe_addr = pseries_eeh_get_pe_addr, 695 .get_state = pseries_eeh_get_state, 696 .reset = pseries_eeh_reset, 697 .wait_state = pseries_eeh_wait_state, 698 .get_log = pseries_eeh_get_log, 699 .configure_bridge = pseries_eeh_configure_bridge, 700 .err_inject = NULL, 701 .read_config = pseries_eeh_read_config, 702 .write_config = pseries_eeh_write_config, 703 .next_error = NULL, 704 .restore_config = NULL 705 }; 706 707 /** 708 * eeh_pseries_init - Register platform dependent EEH operations 709 * 710 * EEH initialization on pseries platform. This function should be 711 * called before any EEH related functions. 712 */ 713 static int __init eeh_pseries_init(void) 714 { 715 int ret; 716 717 ret = eeh_ops_register(&pseries_eeh_ops); 718 if (!ret) 719 pr_info("EEH: pSeries platform initialized\n"); 720 else 721 pr_info("EEH: pSeries platform initialization failure (%d)\n", 722 ret); 723 724 return ret; 725 } 726 machine_early_initcall(pseries, eeh_pseries_init); 727