1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * The file intends to implement the platform dependent EEH operations on pseries. 4 * Actually, the pseries platform is built based on RTAS heavily. That means the 5 * pseries platform dependent EEH operations will be built on RTAS calls. The functions 6 * are derived from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has 7 * been done. 8 * 9 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2011. 10 * Copyright IBM Corporation 2001, 2005, 2006 11 * Copyright Dave Engebretsen & Todd Inglett 2001 12 * Copyright Linas Vepstas 2005, 2006 13 */ 14 15 #include <linux/atomic.h> 16 #include <linux/delay.h> 17 #include <linux/export.h> 18 #include <linux/init.h> 19 #include <linux/list.h> 20 #include <linux/of.h> 21 #include <linux/pci.h> 22 #include <linux/proc_fs.h> 23 #include <linux/rbtree.h> 24 #include <linux/sched.h> 25 #include <linux/seq_file.h> 26 #include <linux/spinlock.h> 27 #include <linux/crash_dump.h> 28 29 #include <asm/eeh.h> 30 #include <asm/eeh_event.h> 31 #include <asm/io.h> 32 #include <asm/machdep.h> 33 #include <asm/ppc-pci.h> 34 #include <asm/rtas.h> 35 36 /* RTAS tokens */ 37 static int ibm_set_eeh_option; 38 static int ibm_set_slot_reset; 39 static int ibm_read_slot_reset_state; 40 static int ibm_read_slot_reset_state2; 41 static int ibm_slot_error_detail; 42 static int ibm_get_config_addr_info; 43 static int ibm_get_config_addr_info2; 44 static int ibm_configure_pe; 45 46 static void pseries_eeh_init_edev(struct pci_dn *pdn); 47 48 static void pseries_pcibios_bus_add_device(struct pci_dev *pdev) 49 { 50 struct pci_dn *pdn = pci_get_pdn(pdev); 51 52 if (eeh_has_flag(EEH_FORCE_DISABLED)) 53 return; 54 55 dev_dbg(&pdev->dev, "EEH: Setting up device\n"); 56 #ifdef CONFIG_PCI_IOV 57 if (pdev->is_virtfn) { 58 pdn->device_id = pdev->device; 59 pdn->vendor_id = pdev->vendor; 60 pdn->class_code = pdev->class; 61 /* 62 * Last allow unfreeze return code used for retrieval 63 * by user space in eeh-sysfs to show the last command 64 * completion from platform. 65 */ 66 pdn->last_allow_rc = 0; 67 } 68 #endif 69 pseries_eeh_init_edev(pdn); 70 #ifdef CONFIG_PCI_IOV 71 if (pdev->is_virtfn) { 72 /* 73 * FIXME: This really should be handled by choosing the right 74 * parent PE in pseries_eeh_init_edev(). 75 */ 76 struct eeh_pe *physfn_pe = pci_dev_to_eeh_dev(pdev->physfn)->pe; 77 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 78 79 edev->pe_config_addr = (pdn->busno << 16) | (pdn->devfn << 8); 80 eeh_pe_tree_remove(edev); /* Remove as it is adding to bus pe */ 81 eeh_pe_tree_insert(edev, physfn_pe); /* Add as VF PE type */ 82 } 83 #endif 84 eeh_probe_device(pdev); 85 } 86 87 88 /** 89 * pseries_eeh_get_pe_config_addr - Find the pe_config_addr for a device 90 * @pdn: pci_dn of the input device 91 * 92 * The EEH RTAS calls use a tuple consisting of: (buid_hi, buid_lo, 93 * pe_config_addr) as a handle to a given PE. This function finds the 94 * pe_config_addr based on the device's config addr. 95 * 96 * Keep in mind that the pe_config_addr *might* be numerically identical to the 97 * device's config addr, but the two are conceptually distinct. 98 * 99 * Returns the pe_config_addr, or a negative error code. 100 */ 101 static int pseries_eeh_get_pe_config_addr(struct pci_dn *pdn) 102 { 103 int config_addr = rtas_config_addr(pdn->busno, pdn->devfn, 0); 104 struct pci_controller *phb = pdn->phb; 105 int ret, rets[3]; 106 107 if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) { 108 /* 109 * First of all, use function 1 to determine if this device is 110 * part of a PE or not. ret[0] being zero indicates it's not. 111 */ 112 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, 113 config_addr, BUID_HI(phb->buid), 114 BUID_LO(phb->buid), 1); 115 if (ret || (rets[0] == 0)) 116 return -ENOENT; 117 118 /* Retrieve the associated PE config address with function 0 */ 119 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, 120 config_addr, BUID_HI(phb->buid), 121 BUID_LO(phb->buid), 0); 122 if (ret) { 123 pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n", 124 __func__, phb->global_number, config_addr); 125 return -ENXIO; 126 } 127 128 return rets[0]; 129 } 130 131 if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) { 132 ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets, 133 config_addr, BUID_HI(phb->buid), 134 BUID_LO(phb->buid), 0); 135 if (ret) { 136 pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n", 137 __func__, phb->global_number, config_addr); 138 return -ENXIO; 139 } 140 141 return rets[0]; 142 } 143 144 /* 145 * PAPR does describe a process for finding the pe_config_addr that was 146 * used before the ibm,get-config-addr-info calls were added. However, 147 * I haven't found *any* systems that don't have that RTAS call 148 * implemented. If you happen to find one that needs the old DT based 149 * process, patches are welcome! 150 */ 151 return -ENOENT; 152 } 153 154 /** 155 * pseries_eeh_phb_reset - Reset the specified PHB 156 * @phb: PCI controller 157 * @config_addr: the associated config address 158 * @option: reset option 159 * 160 * Reset the specified PHB/PE 161 */ 162 static int pseries_eeh_phb_reset(struct pci_controller *phb, int config_addr, int option) 163 { 164 int ret; 165 166 /* Reset PE through RTAS call */ 167 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, 168 config_addr, BUID_HI(phb->buid), 169 BUID_LO(phb->buid), option); 170 171 /* If fundamental-reset not supported, try hot-reset */ 172 if (option == EEH_RESET_FUNDAMENTAL && ret == -8) { 173 option = EEH_RESET_HOT; 174 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, 175 config_addr, BUID_HI(phb->buid), 176 BUID_LO(phb->buid), option); 177 } 178 179 /* We need reset hold or settlement delay */ 180 if (option == EEH_RESET_FUNDAMENTAL || option == EEH_RESET_HOT) 181 msleep(EEH_PE_RST_HOLD_TIME); 182 else 183 msleep(EEH_PE_RST_SETTLE_TIME); 184 185 return ret; 186 } 187 188 /** 189 * pseries_eeh_phb_configure_bridge - Configure PCI bridges in the indicated PE 190 * @phb: PCI controller 191 * @config_addr: the associated config address 192 * 193 * The function will be called to reconfigure the bridges included 194 * in the specified PE so that the mulfunctional PE would be recovered 195 * again. 196 */ 197 static int pseries_eeh_phb_configure_bridge(struct pci_controller *phb, int config_addr) 198 { 199 int ret; 200 /* Waiting 0.2s maximum before skipping configuration */ 201 int max_wait = 200; 202 203 while (max_wait > 0) { 204 ret = rtas_call(ibm_configure_pe, 3, 1, NULL, 205 config_addr, BUID_HI(phb->buid), 206 BUID_LO(phb->buid)); 207 208 if (!ret) 209 return ret; 210 if (ret < 0) 211 break; 212 213 /* 214 * If RTAS returns a delay value that's above 100ms, cut it 215 * down to 100ms in case firmware made a mistake. For more 216 * on how these delay values work see rtas_busy_delay_time 217 */ 218 if (ret > RTAS_EXTENDED_DELAY_MIN+2 && 219 ret <= RTAS_EXTENDED_DELAY_MAX) 220 ret = RTAS_EXTENDED_DELAY_MIN+2; 221 222 max_wait -= rtas_busy_delay_time(ret); 223 224 if (max_wait < 0) 225 break; 226 227 rtas_busy_delay(ret); 228 } 229 230 pr_warn("%s: Unable to configure bridge PHB#%x-PE#%x (%d)\n", 231 __func__, phb->global_number, config_addr, ret); 232 /* PAPR defines -3 as "Parameter Error" for this function: */ 233 if (ret == -3) 234 return -EINVAL; 235 else 236 return -EIO; 237 } 238 239 /* 240 * Buffer for reporting slot-error-detail rtas calls. Its here 241 * in BSS, and not dynamically alloced, so that it ends up in 242 * RMO where RTAS can access it. 243 */ 244 static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX]; 245 static DEFINE_SPINLOCK(slot_errbuf_lock); 246 static int eeh_error_buf_size; 247 248 static int pseries_eeh_cap_start(struct pci_dn *pdn) 249 { 250 u32 status; 251 252 if (!pdn) 253 return 0; 254 255 rtas_pci_dn_read_config(pdn, PCI_STATUS, 2, &status); 256 if (!(status & PCI_STATUS_CAP_LIST)) 257 return 0; 258 259 return PCI_CAPABILITY_LIST; 260 } 261 262 263 static int pseries_eeh_find_cap(struct pci_dn *pdn, int cap) 264 { 265 int pos = pseries_eeh_cap_start(pdn); 266 int cnt = 48; /* Maximal number of capabilities */ 267 u32 id; 268 269 if (!pos) 270 return 0; 271 272 while (cnt--) { 273 rtas_pci_dn_read_config(pdn, pos, 1, &pos); 274 if (pos < 0x40) 275 break; 276 pos &= ~3; 277 rtas_pci_dn_read_config(pdn, pos + PCI_CAP_LIST_ID, 1, &id); 278 if (id == 0xff) 279 break; 280 if (id == cap) 281 return pos; 282 pos += PCI_CAP_LIST_NEXT; 283 } 284 285 return 0; 286 } 287 288 static int pseries_eeh_find_ecap(struct pci_dn *pdn, int cap) 289 { 290 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 291 u32 header; 292 int pos = 256; 293 int ttl = (4096 - 256) / 8; 294 295 if (!edev || !edev->pcie_cap) 296 return 0; 297 if (rtas_pci_dn_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) 298 return 0; 299 else if (!header) 300 return 0; 301 302 while (ttl-- > 0) { 303 if (PCI_EXT_CAP_ID(header) == cap && pos) 304 return pos; 305 306 pos = PCI_EXT_CAP_NEXT(header); 307 if (pos < 256) 308 break; 309 310 if (rtas_pci_dn_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) 311 break; 312 } 313 314 return 0; 315 } 316 317 /** 318 * pseries_eeh_pe_get_parent - Retrieve the parent PE 319 * @edev: EEH device 320 * 321 * The whole PEs existing in the system are organized as hierarchy 322 * tree. The function is used to retrieve the parent PE according 323 * to the parent EEH device. 324 */ 325 static struct eeh_pe *pseries_eeh_pe_get_parent(struct eeh_dev *edev) 326 { 327 struct eeh_dev *parent; 328 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 329 330 /* 331 * It might have the case for the indirect parent 332 * EEH device already having associated PE, but 333 * the direct parent EEH device doesn't have yet. 334 */ 335 if (edev->physfn) 336 pdn = pci_get_pdn(edev->physfn); 337 else 338 pdn = pdn ? pdn->parent : NULL; 339 while (pdn) { 340 /* We're poking out of PCI territory */ 341 parent = pdn_to_eeh_dev(pdn); 342 if (!parent) 343 return NULL; 344 345 if (parent->pe) 346 return parent->pe; 347 348 pdn = pdn->parent; 349 } 350 351 return NULL; 352 } 353 354 /** 355 * pseries_eeh_init_edev - initialise the eeh_dev and eeh_pe for a pci_dn 356 * 357 * @pdn: PCI device node 358 * 359 * When we discover a new PCI device via the device-tree we create a 360 * corresponding pci_dn and we allocate, but don't initialise, an eeh_dev. 361 * This function takes care of the initialisation and inserts the eeh_dev 362 * into the correct eeh_pe. If no eeh_pe exists we'll allocate one. 363 */ 364 static void pseries_eeh_init_edev(struct pci_dn *pdn) 365 { 366 struct eeh_pe pe, *parent; 367 struct eeh_dev *edev; 368 u32 pcie_flags; 369 int ret; 370 371 if (WARN_ON_ONCE(!eeh_has_flag(EEH_PROBE_MODE_DEVTREE))) 372 return; 373 374 /* 375 * Find the eeh_dev for this pdn. The storage for the eeh_dev was 376 * allocated at the same time as the pci_dn. 377 * 378 * XXX: We should probably re-visit that. 379 */ 380 edev = pdn_to_eeh_dev(pdn); 381 if (!edev) 382 return; 383 384 /* 385 * If ->pe is set then we've already probed this device. We hit 386 * this path when a pci_dev is removed and rescanned while recovering 387 * a PE (i.e. for devices where the driver doesn't support error 388 * recovery). 389 */ 390 if (edev->pe) 391 return; 392 393 /* Check class/vendor/device IDs */ 394 if (!pdn->vendor_id || !pdn->device_id || !pdn->class_code) 395 return; 396 397 /* Skip for PCI-ISA bridge */ 398 if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA) 399 return; 400 401 eeh_edev_dbg(edev, "Probing device\n"); 402 403 /* 404 * Update class code and mode of eeh device. We need 405 * correctly reflects that current device is root port 406 * or PCIe switch downstream port. 407 */ 408 edev->pcix_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_PCIX); 409 edev->pcie_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_EXP); 410 edev->aer_cap = pseries_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR); 411 edev->mode &= 0xFFFFFF00; 412 if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) { 413 edev->mode |= EEH_DEV_BRIDGE; 414 if (edev->pcie_cap) { 415 rtas_pci_dn_read_config(pdn, edev->pcie_cap + PCI_EXP_FLAGS, 416 2, &pcie_flags); 417 pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4; 418 if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT) 419 edev->mode |= EEH_DEV_ROOT_PORT; 420 else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM) 421 edev->mode |= EEH_DEV_DS_PORT; 422 } 423 } 424 425 /* first up, find the pe_config_addr for the PE containing the device */ 426 ret = pseries_eeh_get_pe_config_addr(pdn); 427 if (ret < 0) { 428 eeh_edev_dbg(edev, "Unable to find pe_config_addr\n"); 429 goto err; 430 } 431 432 /* Try enable EEH on the fake PE */ 433 memset(&pe, 0, sizeof(struct eeh_pe)); 434 pe.phb = pdn->phb; 435 pe.addr = ret; 436 437 eeh_edev_dbg(edev, "Enabling EEH on device\n"); 438 ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE); 439 if (ret) { 440 eeh_edev_dbg(edev, "EEH failed to enable on device (code %d)\n", ret); 441 goto err; 442 } 443 444 edev->pe_config_addr = pe.addr; 445 446 eeh_add_flag(EEH_ENABLED); 447 448 parent = pseries_eeh_pe_get_parent(edev); 449 eeh_pe_tree_insert(edev, parent); 450 eeh_save_bars(edev); 451 eeh_edev_dbg(edev, "EEH enabled for device"); 452 453 return; 454 455 err: 456 eeh_edev_dbg(edev, "EEH is unsupported on device (code = %d)\n", ret); 457 } 458 459 static struct eeh_dev *pseries_eeh_probe(struct pci_dev *pdev) 460 { 461 struct eeh_dev *edev; 462 struct pci_dn *pdn; 463 464 pdn = pci_get_pdn_by_devfn(pdev->bus, pdev->devfn); 465 if (!pdn) 466 return NULL; 467 468 /* 469 * If the system supports EEH on this device then the eeh_dev was 470 * configured and inserted into a PE in pseries_eeh_init_edev() 471 */ 472 edev = pdn_to_eeh_dev(pdn); 473 if (!edev || !edev->pe) 474 return NULL; 475 476 return edev; 477 } 478 479 /** 480 * pseries_eeh_init_edev_recursive - Enable EEH for the indicated device 481 * @pdn: PCI device node 482 * 483 * This routine must be used to perform EEH initialization for the 484 * indicated PCI device that was added after system boot (e.g. 485 * hotplug, dlpar). 486 */ 487 void pseries_eeh_init_edev_recursive(struct pci_dn *pdn) 488 { 489 struct pci_dn *n; 490 491 if (!pdn) 492 return; 493 494 list_for_each_entry(n, &pdn->child_list, list) 495 pseries_eeh_init_edev_recursive(n); 496 497 pseries_eeh_init_edev(pdn); 498 } 499 EXPORT_SYMBOL_GPL(pseries_eeh_init_edev_recursive); 500 501 /** 502 * pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable 503 * @pe: EEH PE 504 * @option: operation to be issued 505 * 506 * The function is used to control the EEH functionality globally. 507 * Currently, following options are support according to PAPR: 508 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA 509 */ 510 static int pseries_eeh_set_option(struct eeh_pe *pe, int option) 511 { 512 int ret = 0; 513 514 /* 515 * When we're enabling or disabling EEH functionality on 516 * the particular PE, the PE config address is possibly 517 * unavailable. Therefore, we have to figure it out from 518 * the FDT node. 519 */ 520 switch (option) { 521 case EEH_OPT_DISABLE: 522 case EEH_OPT_ENABLE: 523 case EEH_OPT_THAW_MMIO: 524 case EEH_OPT_THAW_DMA: 525 break; 526 case EEH_OPT_FREEZE_PE: 527 /* Not support */ 528 return 0; 529 default: 530 pr_err("%s: Invalid option %d\n", __func__, option); 531 return -EINVAL; 532 } 533 534 ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL, 535 pe->addr, BUID_HI(pe->phb->buid), 536 BUID_LO(pe->phb->buid), option); 537 538 return ret; 539 } 540 541 /** 542 * pseries_eeh_get_state - Retrieve PE state 543 * @pe: EEH PE 544 * @delay: suggested time to wait if state is unavailable 545 * 546 * Retrieve the state of the specified PE. On RTAS compliant 547 * pseries platform, there already has one dedicated RTAS function 548 * for the purpose. It's notable that the associated PE config address 549 * might be ready when calling the function. Therefore, endeavour to 550 * use the PE config address if possible. Further more, there're 2 551 * RTAS calls for the purpose, we need to try the new one and back 552 * to the old one if the new one couldn't work properly. 553 */ 554 static int pseries_eeh_get_state(struct eeh_pe *pe, int *delay) 555 { 556 int ret; 557 int rets[4]; 558 int result; 559 560 if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) { 561 ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets, 562 pe->addr, BUID_HI(pe->phb->buid), 563 BUID_LO(pe->phb->buid)); 564 } else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) { 565 /* Fake PE unavailable info */ 566 rets[2] = 0; 567 ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets, 568 pe->addr, BUID_HI(pe->phb->buid), 569 BUID_LO(pe->phb->buid)); 570 } else { 571 return EEH_STATE_NOT_SUPPORT; 572 } 573 574 if (ret) 575 return ret; 576 577 /* Parse the result out */ 578 if (!rets[1]) 579 return EEH_STATE_NOT_SUPPORT; 580 581 switch(rets[0]) { 582 case 0: 583 result = EEH_STATE_MMIO_ACTIVE | 584 EEH_STATE_DMA_ACTIVE; 585 break; 586 case 1: 587 result = EEH_STATE_RESET_ACTIVE | 588 EEH_STATE_MMIO_ACTIVE | 589 EEH_STATE_DMA_ACTIVE; 590 break; 591 case 2: 592 result = 0; 593 break; 594 case 4: 595 result = EEH_STATE_MMIO_ENABLED; 596 break; 597 case 5: 598 if (rets[2]) { 599 if (delay) 600 *delay = rets[2]; 601 result = EEH_STATE_UNAVAILABLE; 602 } else { 603 result = EEH_STATE_NOT_SUPPORT; 604 } 605 break; 606 default: 607 result = EEH_STATE_NOT_SUPPORT; 608 } 609 610 return result; 611 } 612 613 /** 614 * pseries_eeh_reset - Reset the specified PE 615 * @pe: EEH PE 616 * @option: reset option 617 * 618 * Reset the specified PE 619 */ 620 static int pseries_eeh_reset(struct eeh_pe *pe, int option) 621 { 622 return pseries_eeh_phb_reset(pe->phb, pe->addr, option); 623 } 624 625 /** 626 * pseries_eeh_get_log - Retrieve error log 627 * @pe: EEH PE 628 * @severity: temporary or permanent error log 629 * @drv_log: driver log to be combined with retrieved error log 630 * @len: length of driver log 631 * 632 * Retrieve the temporary or permanent error from the PE. 633 * Actually, the error will be retrieved through the dedicated 634 * RTAS call. 635 */ 636 static int pseries_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len) 637 { 638 unsigned long flags; 639 int ret; 640 641 spin_lock_irqsave(&slot_errbuf_lock, flags); 642 memset(slot_errbuf, 0, eeh_error_buf_size); 643 644 ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, pe->addr, 645 BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid), 646 virt_to_phys(drv_log), len, 647 virt_to_phys(slot_errbuf), eeh_error_buf_size, 648 severity); 649 if (!ret) 650 log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0); 651 spin_unlock_irqrestore(&slot_errbuf_lock, flags); 652 653 return ret; 654 } 655 656 /** 657 * pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE 658 * @pe: EEH PE 659 * 660 */ 661 static int pseries_eeh_configure_bridge(struct eeh_pe *pe) 662 { 663 return pseries_eeh_phb_configure_bridge(pe->phb, pe->addr); 664 } 665 666 /** 667 * pseries_eeh_read_config - Read PCI config space 668 * @edev: EEH device handle 669 * @where: PCI config space offset 670 * @size: size to read 671 * @val: return value 672 * 673 * Read config space from the speicifed device 674 */ 675 static int pseries_eeh_read_config(struct eeh_dev *edev, int where, int size, u32 *val) 676 { 677 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 678 679 return rtas_pci_dn_read_config(pdn, where, size, val); 680 } 681 682 /** 683 * pseries_eeh_write_config - Write PCI config space 684 * @edev: EEH device handle 685 * @where: PCI config space offset 686 * @size: size to write 687 * @val: value to be written 688 * 689 * Write config space to the specified device 690 */ 691 static int pseries_eeh_write_config(struct eeh_dev *edev, int where, int size, u32 val) 692 { 693 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 694 695 return rtas_pci_dn_write_config(pdn, where, size, val); 696 } 697 698 #ifdef CONFIG_PCI_IOV 699 static int pseries_send_allow_unfreeze(struct pci_dn *pdn, u16 *vf_pe_array, int cur_vfs) 700 { 701 int rc; 702 int ibm_allow_unfreeze = rtas_function_token(RTAS_FN_IBM_OPEN_SRIOV_ALLOW_UNFREEZE); 703 unsigned long buid, addr; 704 705 addr = rtas_config_addr(pdn->busno, pdn->devfn, 0); 706 buid = pdn->phb->buid; 707 spin_lock(&rtas_data_buf_lock); 708 memcpy(rtas_data_buf, vf_pe_array, RTAS_DATA_BUF_SIZE); 709 rc = rtas_call(ibm_allow_unfreeze, 5, 1, NULL, 710 addr, 711 BUID_HI(buid), 712 BUID_LO(buid), 713 rtas_data_buf, cur_vfs * sizeof(u16)); 714 spin_unlock(&rtas_data_buf_lock); 715 if (rc) 716 pr_warn("%s: Failed to allow unfreeze for PHB#%x-PE#%lx, rc=%x\n", 717 __func__, 718 pdn->phb->global_number, addr, rc); 719 return rc; 720 } 721 722 static int pseries_call_allow_unfreeze(struct eeh_dev *edev) 723 { 724 int cur_vfs = 0, rc = 0, vf_index, bus, devfn, vf_pe_num; 725 struct pci_dn *pdn, *tmp, *parent, *physfn_pdn; 726 u16 *vf_pe_array; 727 728 vf_pe_array = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); 729 if (!vf_pe_array) 730 return -ENOMEM; 731 if (pci_num_vf(edev->physfn ? edev->physfn : edev->pdev)) { 732 if (edev->pdev->is_physfn) { 733 cur_vfs = pci_num_vf(edev->pdev); 734 pdn = eeh_dev_to_pdn(edev); 735 parent = pdn->parent; 736 for (vf_index = 0; vf_index < cur_vfs; vf_index++) 737 vf_pe_array[vf_index] = 738 cpu_to_be16(pdn->pe_num_map[vf_index]); 739 rc = pseries_send_allow_unfreeze(pdn, vf_pe_array, 740 cur_vfs); 741 pdn->last_allow_rc = rc; 742 for (vf_index = 0; vf_index < cur_vfs; vf_index++) { 743 list_for_each_entry_safe(pdn, tmp, 744 &parent->child_list, 745 list) { 746 bus = pci_iov_virtfn_bus(edev->pdev, 747 vf_index); 748 devfn = pci_iov_virtfn_devfn(edev->pdev, 749 vf_index); 750 if (pdn->busno != bus || 751 pdn->devfn != devfn) 752 continue; 753 pdn->last_allow_rc = rc; 754 } 755 } 756 } else { 757 pdn = pci_get_pdn(edev->pdev); 758 physfn_pdn = pci_get_pdn(edev->physfn); 759 760 vf_pe_num = physfn_pdn->pe_num_map[edev->vf_index]; 761 vf_pe_array[0] = cpu_to_be16(vf_pe_num); 762 rc = pseries_send_allow_unfreeze(physfn_pdn, 763 vf_pe_array, 1); 764 pdn->last_allow_rc = rc; 765 } 766 } 767 768 kfree(vf_pe_array); 769 return rc; 770 } 771 772 static int pseries_notify_resume(struct eeh_dev *edev) 773 { 774 if (!edev) 775 return -EEXIST; 776 777 if (rtas_function_token(RTAS_FN_IBM_OPEN_SRIOV_ALLOW_UNFREEZE) == RTAS_UNKNOWN_SERVICE) 778 return -EINVAL; 779 780 if (edev->pdev->is_physfn || edev->pdev->is_virtfn) 781 return pseries_call_allow_unfreeze(edev); 782 783 return 0; 784 } 785 #endif 786 787 /** 788 * pseries_eeh_err_inject - Inject specified error to the indicated PE 789 * @pe: the indicated PE 790 * @type: error type 791 * @func: specific error type 792 * @addr: address 793 * @mask: address mask 794 * The routine is called to inject specified error, which is 795 * determined by @type and @func, to the indicated PE 796 */ 797 static int pseries_eeh_err_inject(struct eeh_pe *pe, int type, int func, 798 unsigned long addr, unsigned long mask) 799 { 800 struct eeh_dev *pdev; 801 802 /* Check on PCI error type */ 803 if (type != EEH_ERR_TYPE_32 && type != EEH_ERR_TYPE_64) 804 return -EINVAL; 805 806 switch (func) { 807 case EEH_ERR_FUNC_LD_MEM_ADDR: 808 case EEH_ERR_FUNC_LD_MEM_DATA: 809 case EEH_ERR_FUNC_ST_MEM_ADDR: 810 case EEH_ERR_FUNC_ST_MEM_DATA: 811 /* injects a MMIO error for all pdev's belonging to PE */ 812 pci_lock_rescan_remove(); 813 list_for_each_entry(pdev, &pe->edevs, entry) 814 eeh_pe_inject_mmio_error(pdev->pdev); 815 pci_unlock_rescan_remove(); 816 break; 817 default: 818 return -ERANGE; 819 } 820 821 return 0; 822 } 823 824 static struct eeh_ops pseries_eeh_ops = { 825 .name = "pseries", 826 .probe = pseries_eeh_probe, 827 .set_option = pseries_eeh_set_option, 828 .get_state = pseries_eeh_get_state, 829 .reset = pseries_eeh_reset, 830 .get_log = pseries_eeh_get_log, 831 .configure_bridge = pseries_eeh_configure_bridge, 832 .err_inject = pseries_eeh_err_inject, 833 .read_config = pseries_eeh_read_config, 834 .write_config = pseries_eeh_write_config, 835 .next_error = NULL, 836 .restore_config = NULL, /* NB: configure_bridge() does this */ 837 #ifdef CONFIG_PCI_IOV 838 .notify_resume = pseries_notify_resume 839 #endif 840 }; 841 842 /** 843 * eeh_pseries_init - Register platform dependent EEH operations 844 * 845 * EEH initialization on pseries platform. This function should be 846 * called before any EEH related functions. 847 */ 848 static int __init eeh_pseries_init(void) 849 { 850 struct pci_controller *phb; 851 struct pci_dn *pdn; 852 int ret, config_addr; 853 854 /* figure out EEH RTAS function call tokens */ 855 ibm_set_eeh_option = rtas_function_token(RTAS_FN_IBM_SET_EEH_OPTION); 856 ibm_set_slot_reset = rtas_function_token(RTAS_FN_IBM_SET_SLOT_RESET); 857 ibm_read_slot_reset_state2 = rtas_function_token(RTAS_FN_IBM_READ_SLOT_RESET_STATE2); 858 ibm_read_slot_reset_state = rtas_function_token(RTAS_FN_IBM_READ_SLOT_RESET_STATE); 859 ibm_slot_error_detail = rtas_function_token(RTAS_FN_IBM_SLOT_ERROR_DETAIL); 860 ibm_get_config_addr_info2 = rtas_function_token(RTAS_FN_IBM_GET_CONFIG_ADDR_INFO2); 861 ibm_get_config_addr_info = rtas_function_token(RTAS_FN_IBM_GET_CONFIG_ADDR_INFO); 862 ibm_configure_pe = rtas_function_token(RTAS_FN_IBM_CONFIGURE_PE); 863 864 /* 865 * ibm,configure-pe and ibm,configure-bridge have the same semantics, 866 * however ibm,configure-pe can be faster. If we can't find 867 * ibm,configure-pe then fall back to using ibm,configure-bridge. 868 */ 869 if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE) 870 ibm_configure_pe = rtas_function_token(RTAS_FN_IBM_CONFIGURE_BRIDGE); 871 872 /* 873 * Necessary sanity check. We needn't check "get-config-addr-info" 874 * and its variant since the old firmware probably support address 875 * of domain/bus/slot/function for EEH RTAS operations. 876 */ 877 if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE || 878 ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE || 879 (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE && 880 ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) || 881 ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE || 882 ibm_configure_pe == RTAS_UNKNOWN_SERVICE) { 883 pr_info("EEH functionality not supported\n"); 884 return -EINVAL; 885 } 886 887 /* Initialize error log size */ 888 eeh_error_buf_size = rtas_get_error_log_max(); 889 890 /* Set EEH probe mode */ 891 eeh_add_flag(EEH_PROBE_MODE_DEVTREE | EEH_ENABLE_IO_FOR_LOG); 892 893 /* Set EEH machine dependent code */ 894 ppc_md.pcibios_bus_add_device = pseries_pcibios_bus_add_device; 895 896 if (is_kdump_kernel() || reset_devices) { 897 pr_info("Issue PHB reset ...\n"); 898 list_for_each_entry(phb, &hose_list, list_node) { 899 // Skip if the slot is empty 900 if (list_empty(&PCI_DN(phb->dn)->child_list)) 901 continue; 902 903 pdn = list_first_entry(&PCI_DN(phb->dn)->child_list, struct pci_dn, list); 904 config_addr = pseries_eeh_get_pe_config_addr(pdn); 905 906 /* invalid PE config addr */ 907 if (config_addr < 0) 908 continue; 909 910 pseries_eeh_phb_reset(phb, config_addr, EEH_RESET_FUNDAMENTAL); 911 pseries_eeh_phb_reset(phb, config_addr, EEH_RESET_DEACTIVATE); 912 pseries_eeh_phb_configure_bridge(phb, config_addr); 913 } 914 } 915 916 ret = eeh_init(&pseries_eeh_ops); 917 if (!ret) 918 pr_info("EEH: pSeries platform initialized\n"); 919 else 920 pr_info("EEH: pSeries platform initialization failure (%d)\n", 921 ret); 922 return ret; 923 } 924 machine_arch_initcall(pseries, eeh_pseries_init); 925