1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * PCI Error Recovery Driver for RPA-compliant PPC64 platform. 4 * Copyright IBM Corp. 2004 2005 5 * Copyright Linas Vepstas <linas@linas.org> 2004, 2005 6 * 7 * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com> 8 */ 9 #include <linux/delay.h> 10 #include <linux/interrupt.h> 11 #include <linux/irq.h> 12 #include <linux/module.h> 13 #include <linux/pci.h> 14 #include <linux/pci_hotplug.h> 15 #include <asm/eeh.h> 16 #include <asm/eeh_event.h> 17 #include <asm/ppc-pci.h> 18 #include <asm/pci-bridge.h> 19 #include <asm/rtas.h> 20 21 struct eeh_rmv_data { 22 struct list_head removed_vf_list; 23 int removed_dev_count; 24 }; 25 26 static int eeh_result_priority(enum pci_ers_result result) 27 { 28 switch (result) { 29 case PCI_ERS_RESULT_NONE: 30 return 1; 31 case PCI_ERS_RESULT_NO_AER_DRIVER: 32 return 2; 33 case PCI_ERS_RESULT_RECOVERED: 34 return 3; 35 case PCI_ERS_RESULT_CAN_RECOVER: 36 return 4; 37 case PCI_ERS_RESULT_DISCONNECT: 38 return 5; 39 case PCI_ERS_RESULT_NEED_RESET: 40 return 6; 41 default: 42 WARN_ONCE(1, "Unknown pci_ers_result value: %d\n", result); 43 return 0; 44 } 45 }; 46 47 static const char *pci_ers_result_name(enum pci_ers_result result) 48 { 49 switch (result) { 50 case PCI_ERS_RESULT_NONE: 51 return "none"; 52 case PCI_ERS_RESULT_CAN_RECOVER: 53 return "can recover"; 54 case PCI_ERS_RESULT_NEED_RESET: 55 return "need reset"; 56 case PCI_ERS_RESULT_DISCONNECT: 57 return "disconnect"; 58 case PCI_ERS_RESULT_RECOVERED: 59 return "recovered"; 60 case PCI_ERS_RESULT_NO_AER_DRIVER: 61 return "no AER driver"; 62 default: 63 WARN_ONCE(1, "Unknown result type: %d\n", result); 64 return "unknown"; 65 } 66 }; 67 68 static enum pci_ers_result pci_ers_merge_result(enum pci_ers_result old, 69 enum pci_ers_result new) 70 { 71 if (eeh_result_priority(new) > eeh_result_priority(old)) 72 return new; 73 return old; 74 } 75 76 static bool eeh_dev_removed(struct eeh_dev *edev) 77 { 78 return !edev || (edev->mode & EEH_DEV_REMOVED); 79 } 80 81 static bool eeh_edev_actionable(struct eeh_dev *edev) 82 { 83 if (!edev->pdev) 84 return false; 85 if (edev->pdev->error_state == pci_channel_io_perm_failure) 86 return false; 87 if (eeh_dev_removed(edev)) 88 return false; 89 if (eeh_pe_passed(edev->pe)) 90 return false; 91 92 return true; 93 } 94 95 /** 96 * eeh_pcid_get - Get the PCI device driver 97 * @pdev: PCI device 98 * 99 * The function is used to retrieve the PCI device driver for 100 * the indicated PCI device. Besides, we will increase the reference 101 * of the PCI device driver to prevent that being unloaded on 102 * the fly. Otherwise, kernel crash would be seen. 103 */ 104 static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev) 105 { 106 if (!pdev || !pdev->dev.driver) 107 return NULL; 108 109 if (!try_module_get(pdev->dev.driver->owner)) 110 return NULL; 111 112 return to_pci_driver(pdev->dev.driver); 113 } 114 115 /** 116 * eeh_pcid_put - Dereference on the PCI device driver 117 * @pdev: PCI device 118 * 119 * The function is called to do dereference on the PCI device 120 * driver of the indicated PCI device. 121 */ 122 static inline void eeh_pcid_put(struct pci_dev *pdev) 123 { 124 if (!pdev || !pdev->dev.driver) 125 return; 126 127 module_put(pdev->dev.driver->owner); 128 } 129 130 /** 131 * eeh_disable_irq - Disable interrupt for the recovering device 132 * @dev: PCI device 133 * 134 * This routine must be called when reporting temporary or permanent 135 * error to the particular PCI device to disable interrupt of that 136 * device. If the device has enabled MSI or MSI-X interrupt, we needn't 137 * do real work because EEH should freeze DMA transfers for those PCI 138 * devices encountering EEH errors, which includes MSI or MSI-X. 139 */ 140 static void eeh_disable_irq(struct eeh_dev *edev) 141 { 142 /* Don't disable MSI and MSI-X interrupts. They are 143 * effectively disabled by the DMA Stopped state 144 * when an EEH error occurs. 145 */ 146 if (edev->pdev->msi_enabled || edev->pdev->msix_enabled) 147 return; 148 149 if (!irq_has_action(edev->pdev->irq)) 150 return; 151 152 edev->mode |= EEH_DEV_IRQ_DISABLED; 153 disable_irq_nosync(edev->pdev->irq); 154 } 155 156 /** 157 * eeh_enable_irq - Enable interrupt for the recovering device 158 * @dev: PCI device 159 * 160 * This routine must be called to enable interrupt while failed 161 * device could be resumed. 162 */ 163 static void eeh_enable_irq(struct eeh_dev *edev) 164 { 165 if ((edev->mode) & EEH_DEV_IRQ_DISABLED) { 166 edev->mode &= ~EEH_DEV_IRQ_DISABLED; 167 /* 168 * FIXME !!!!! 169 * 170 * This is just ass backwards. This maze has 171 * unbalanced irq_enable/disable calls. So instead of 172 * finding the root cause it works around the warning 173 * in the irq_enable code by conditionally calling 174 * into it. 175 * 176 * That's just wrong.The warning in the core code is 177 * there to tell people to fix their asymmetries in 178 * their own code, not by abusing the core information 179 * to avoid it. 180 * 181 * I so wish that the assymetry would be the other way 182 * round and a few more irq_disable calls render that 183 * shit unusable forever. 184 * 185 * tglx 186 */ 187 if (irqd_irq_disabled(irq_get_irq_data(edev->pdev->irq))) 188 enable_irq(edev->pdev->irq); 189 } 190 } 191 192 static void eeh_dev_save_state(struct eeh_dev *edev, void *userdata) 193 { 194 struct pci_dev *pdev; 195 196 if (!edev) 197 return; 198 199 /* 200 * We cannot access the config space on some adapters. 201 * Otherwise, it will cause fenced PHB. We don't save 202 * the content in their config space and will restore 203 * from the initial config space saved when the EEH 204 * device is created. 205 */ 206 if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) 207 return; 208 209 pdev = eeh_dev_to_pci_dev(edev); 210 if (!pdev) 211 return; 212 213 pci_save_state(pdev); 214 } 215 216 static void eeh_set_channel_state(struct eeh_pe *root, pci_channel_state_t s) 217 { 218 struct eeh_pe *pe; 219 struct eeh_dev *edev, *tmp; 220 221 eeh_for_each_pe(root, pe) 222 eeh_pe_for_each_dev(pe, edev, tmp) 223 if (eeh_edev_actionable(edev)) 224 edev->pdev->error_state = s; 225 } 226 227 static void eeh_set_irq_state(struct eeh_pe *root, bool enable) 228 { 229 struct eeh_pe *pe; 230 struct eeh_dev *edev, *tmp; 231 232 eeh_for_each_pe(root, pe) { 233 eeh_pe_for_each_dev(pe, edev, tmp) { 234 if (!eeh_edev_actionable(edev)) 235 continue; 236 237 if (!eeh_pcid_get(edev->pdev)) 238 continue; 239 240 if (enable) 241 eeh_enable_irq(edev); 242 else 243 eeh_disable_irq(edev); 244 245 eeh_pcid_put(edev->pdev); 246 } 247 } 248 } 249 250 typedef enum pci_ers_result (*eeh_report_fn)(struct eeh_dev *, 251 struct pci_dev *, 252 struct pci_driver *); 253 static void eeh_pe_report_edev(struct eeh_dev *edev, eeh_report_fn fn, 254 enum pci_ers_result *result) 255 { 256 struct pci_dev *pdev; 257 struct pci_driver *driver; 258 enum pci_ers_result new_result; 259 260 pdev = edev->pdev; 261 if (pdev) 262 get_device(&pdev->dev); 263 if (!pdev) { 264 eeh_edev_info(edev, "no device"); 265 *result = PCI_ERS_RESULT_DISCONNECT; 266 return; 267 } 268 device_lock(&pdev->dev); 269 if (eeh_edev_actionable(edev)) { 270 driver = eeh_pcid_get(pdev); 271 272 if (!driver) 273 eeh_edev_info(edev, "no driver"); 274 else if (!driver->err_handler) 275 eeh_edev_info(edev, "driver not EEH aware"); 276 else if (edev->mode & EEH_DEV_NO_HANDLER) 277 eeh_edev_info(edev, "driver bound too late"); 278 else { 279 new_result = fn(edev, pdev, driver); 280 eeh_edev_info(edev, "%s driver reports: '%s'", 281 driver->name, 282 pci_ers_result_name(new_result)); 283 if (result) 284 *result = pci_ers_merge_result(*result, 285 new_result); 286 } 287 if (driver) 288 eeh_pcid_put(pdev); 289 } else { 290 eeh_edev_info(edev, "not actionable (%d,%d,%d)", !!pdev, 291 !eeh_dev_removed(edev), !eeh_pe_passed(edev->pe)); 292 } 293 device_unlock(&pdev->dev); 294 if (edev->pdev != pdev) 295 eeh_edev_warn(edev, "Device changed during processing!\n"); 296 put_device(&pdev->dev); 297 } 298 299 static void eeh_pe_report(const char *name, struct eeh_pe *root, 300 eeh_report_fn fn, enum pci_ers_result *result) 301 { 302 struct eeh_pe *pe; 303 struct eeh_dev *edev, *tmp; 304 305 pr_info("EEH: Beginning: '%s'\n", name); 306 eeh_for_each_pe(root, pe) 307 eeh_pe_for_each_dev(pe, edev, tmp) 308 eeh_pe_report_edev(edev, fn, result); 309 if (result) 310 pr_info("EEH: Finished:'%s' with aggregate recovery state:'%s'\n", 311 name, pci_ers_result_name(*result)); 312 else 313 pr_info("EEH: Finished:'%s'", name); 314 } 315 316 /** 317 * eeh_report_error - Report pci error to each device driver 318 * @edev: eeh device 319 * @driver: device's PCI driver 320 * 321 * Report an EEH error to each device driver. 322 */ 323 static enum pci_ers_result eeh_report_error(struct eeh_dev *edev, 324 struct pci_dev *pdev, 325 struct pci_driver *driver) 326 { 327 enum pci_ers_result rc; 328 329 if (!driver->err_handler->error_detected) 330 return PCI_ERS_RESULT_NONE; 331 332 eeh_edev_info(edev, "Invoking %s->error_detected(IO frozen)", 333 driver->name); 334 rc = driver->err_handler->error_detected(pdev, pci_channel_io_frozen); 335 336 edev->in_error = true; 337 pci_uevent_ers(pdev, rc); 338 return rc; 339 } 340 341 /** 342 * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled 343 * @edev: eeh device 344 * @driver: device's PCI driver 345 * 346 * Tells each device driver that IO ports, MMIO and config space I/O 347 * are now enabled. 348 */ 349 static enum pci_ers_result eeh_report_mmio_enabled(struct eeh_dev *edev, 350 struct pci_dev *pdev, 351 struct pci_driver *driver) 352 { 353 if (!driver->err_handler->mmio_enabled) 354 return PCI_ERS_RESULT_NONE; 355 eeh_edev_info(edev, "Invoking %s->mmio_enabled()", driver->name); 356 return driver->err_handler->mmio_enabled(pdev); 357 } 358 359 /** 360 * eeh_report_reset - Tell device that slot has been reset 361 * @edev: eeh device 362 * @driver: device's PCI driver 363 * 364 * This routine must be called while EEH tries to reset particular 365 * PCI device so that the associated PCI device driver could take 366 * some actions, usually to save data the driver needs so that the 367 * driver can work again while the device is recovered. 368 */ 369 static enum pci_ers_result eeh_report_reset(struct eeh_dev *edev, 370 struct pci_dev *pdev, 371 struct pci_driver *driver) 372 { 373 if (!driver->err_handler->slot_reset || !edev->in_error) 374 return PCI_ERS_RESULT_NONE; 375 eeh_edev_info(edev, "Invoking %s->slot_reset()", driver->name); 376 return driver->err_handler->slot_reset(pdev); 377 } 378 379 static void eeh_dev_restore_state(struct eeh_dev *edev, void *userdata) 380 { 381 struct pci_dev *pdev; 382 383 if (!edev) 384 return; 385 386 pci_lock_rescan_remove(); 387 388 /* 389 * The content in the config space isn't saved because 390 * the blocked config space on some adapters. We have 391 * to restore the initial saved config space when the 392 * EEH device is created. 393 */ 394 if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) { 395 if (list_is_last(&edev->entry, &edev->pe->edevs)) 396 eeh_pe_restore_bars(edev->pe); 397 398 pci_unlock_rescan_remove(); 399 return; 400 } 401 402 pdev = eeh_dev_to_pci_dev(edev); 403 if (!pdev) { 404 pci_unlock_rescan_remove(); 405 return; 406 } 407 408 pci_restore_state(pdev); 409 410 pci_unlock_rescan_remove(); 411 } 412 413 /** 414 * eeh_report_resume - Tell device to resume normal operations 415 * @edev: eeh device 416 * @driver: device's PCI driver 417 * 418 * This routine must be called to notify the device driver that it 419 * could resume so that the device driver can do some initialization 420 * to make the recovered device work again. 421 */ 422 static enum pci_ers_result eeh_report_resume(struct eeh_dev *edev, 423 struct pci_dev *pdev, 424 struct pci_driver *driver) 425 { 426 if (!driver->err_handler->resume || !edev->in_error) 427 return PCI_ERS_RESULT_NONE; 428 429 eeh_edev_info(edev, "Invoking %s->resume()", driver->name); 430 driver->err_handler->resume(pdev); 431 432 pci_uevent_ers(edev->pdev, PCI_ERS_RESULT_RECOVERED); 433 #ifdef CONFIG_PCI_IOV 434 if (eeh_ops->notify_resume) 435 eeh_ops->notify_resume(edev); 436 #endif 437 return PCI_ERS_RESULT_NONE; 438 } 439 440 /** 441 * eeh_report_failure - Tell device driver that device is dead. 442 * @edev: eeh device 443 * @driver: device's PCI driver 444 * 445 * This informs the device driver that the device is permanently 446 * dead, and that no further recovery attempts will be made on it. 447 */ 448 static enum pci_ers_result eeh_report_failure(struct eeh_dev *edev, 449 struct pci_dev *pdev, 450 struct pci_driver *driver) 451 { 452 enum pci_ers_result rc; 453 454 if (!driver->err_handler->error_detected) 455 return PCI_ERS_RESULT_NONE; 456 457 eeh_edev_info(edev, "Invoking %s->error_detected(permanent failure)", 458 driver->name); 459 rc = driver->err_handler->error_detected(pdev, 460 pci_channel_io_perm_failure); 461 462 pci_uevent_ers(pdev, PCI_ERS_RESULT_DISCONNECT); 463 return rc; 464 } 465 466 static void *eeh_add_virt_device(struct eeh_dev *edev) 467 { 468 struct pci_driver *driver; 469 struct pci_dev *dev = eeh_dev_to_pci_dev(edev); 470 471 if (!(edev->physfn)) { 472 eeh_edev_warn(edev, "Not for VF\n"); 473 return NULL; 474 } 475 476 driver = eeh_pcid_get(dev); 477 if (driver) { 478 if (driver->err_handler) { 479 eeh_pcid_put(dev); 480 return NULL; 481 } 482 eeh_pcid_put(dev); 483 } 484 485 #ifdef CONFIG_PCI_IOV 486 pci_iov_add_virtfn(edev->physfn, edev->vf_index); 487 #endif 488 return NULL; 489 } 490 491 static void eeh_rmv_device(struct eeh_dev *edev, void *userdata) 492 { 493 struct pci_driver *driver; 494 struct pci_dev *dev = eeh_dev_to_pci_dev(edev); 495 struct eeh_rmv_data *rmv_data = (struct eeh_rmv_data *)userdata; 496 497 /* 498 * Actually, we should remove the PCI bridges as well. 499 * However, that's lots of complexity to do that, 500 * particularly some of devices under the bridge might 501 * support EEH. So we just care about PCI devices for 502 * simplicity here. 503 */ 504 if (!eeh_edev_actionable(edev) || 505 (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)) 506 return; 507 508 if (rmv_data) { 509 driver = eeh_pcid_get(dev); 510 if (driver) { 511 if (driver->err_handler && 512 driver->err_handler->error_detected && 513 driver->err_handler->slot_reset) { 514 eeh_pcid_put(dev); 515 return; 516 } 517 eeh_pcid_put(dev); 518 } 519 } 520 521 /* Remove it from PCI subsystem */ 522 pr_info("EEH: Removing %s without EEH sensitive driver\n", 523 pci_name(dev)); 524 edev->mode |= EEH_DEV_DISCONNECTED; 525 if (rmv_data) 526 rmv_data->removed_dev_count++; 527 528 if (edev->physfn) { 529 #ifdef CONFIG_PCI_IOV 530 pci_iov_remove_virtfn(edev->physfn, edev->vf_index); 531 edev->pdev = NULL; 532 #endif 533 if (rmv_data) 534 list_add(&edev->rmv_entry, &rmv_data->removed_vf_list); 535 } else { 536 pci_lock_rescan_remove(); 537 pci_stop_and_remove_bus_device(dev); 538 pci_unlock_rescan_remove(); 539 } 540 } 541 542 static void *eeh_pe_detach_dev(struct eeh_pe *pe, void *userdata) 543 { 544 struct eeh_dev *edev, *tmp; 545 546 eeh_pe_for_each_dev(pe, edev, tmp) { 547 if (!(edev->mode & EEH_DEV_DISCONNECTED)) 548 continue; 549 550 edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED); 551 eeh_pe_tree_remove(edev); 552 } 553 554 return NULL; 555 } 556 557 /* 558 * Explicitly clear PE's frozen state for PowerNV where 559 * we have frozen PE until BAR restore is completed. It's 560 * harmless to clear it for pSeries. To be consistent with 561 * PE reset (for 3 times), we try to clear the frozen state 562 * for 3 times as well. 563 */ 564 static int eeh_clear_pe_frozen_state(struct eeh_pe *root, bool include_passed) 565 { 566 struct eeh_pe *pe; 567 int i; 568 569 eeh_for_each_pe(root, pe) { 570 if (include_passed || !eeh_pe_passed(pe)) { 571 for (i = 0; i < 3; i++) 572 if (!eeh_unfreeze_pe(pe)) 573 break; 574 if (i >= 3) 575 return -EIO; 576 } 577 } 578 eeh_pe_state_clear(root, EEH_PE_ISOLATED, include_passed); 579 return 0; 580 } 581 582 int eeh_pe_reset_and_recover(struct eeh_pe *pe) 583 { 584 int ret; 585 586 /* Bail if the PE is being recovered */ 587 if (pe->state & EEH_PE_RECOVERING) 588 return 0; 589 590 /* Put the PE into recovery mode */ 591 eeh_pe_state_mark(pe, EEH_PE_RECOVERING); 592 593 /* Save states */ 594 eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL); 595 596 /* Issue reset */ 597 ret = eeh_pe_reset_full(pe, true); 598 if (ret) { 599 eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true); 600 return ret; 601 } 602 603 /* Unfreeze the PE */ 604 ret = eeh_clear_pe_frozen_state(pe, true); 605 if (ret) { 606 eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true); 607 return ret; 608 } 609 610 /* Restore device state */ 611 eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL); 612 613 /* Clear recovery mode */ 614 eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true); 615 616 return 0; 617 } 618 619 /** 620 * eeh_reset_device - Perform actual reset of a pci slot 621 * @driver_eeh_aware: Does the device's driver provide EEH support? 622 * @pe: EEH PE 623 * @bus: PCI bus corresponding to the isolcated slot 624 * @rmv_data: Optional, list to record removed devices 625 * 626 * This routine must be called to do reset on the indicated PE. 627 * During the reset, udev might be invoked because those affected 628 * PCI devices will be removed and then added. 629 */ 630 static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, 631 struct eeh_rmv_data *rmv_data, 632 bool driver_eeh_aware) 633 { 634 time64_t tstamp; 635 int cnt, rc; 636 struct eeh_dev *edev; 637 struct eeh_pe *tmp_pe; 638 bool any_passed = false; 639 640 eeh_for_each_pe(pe, tmp_pe) 641 any_passed |= eeh_pe_passed(tmp_pe); 642 643 /* pcibios will clear the counter; save the value */ 644 cnt = pe->freeze_count; 645 tstamp = pe->tstamp; 646 647 /* 648 * We don't remove the corresponding PE instances because 649 * we need the information afterwords. The attached EEH 650 * devices are expected to be attached soon when calling 651 * into pci_hp_add_devices(). 652 */ 653 eeh_pe_state_mark(pe, EEH_PE_KEEP); 654 if (any_passed || driver_eeh_aware || (pe->type & EEH_PE_VF)) { 655 eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data); 656 } else { 657 pci_hp_remove_devices(bus); 658 } 659 660 /* 661 * Reset the pci controller. (Asserts RST#; resets config space). 662 * Reconfigure bridges and devices. Don't try to bring the system 663 * up if the reset failed for some reason. 664 * 665 * During the reset, it's very dangerous to have uncontrolled PCI 666 * config accesses. So we prefer to block them. However, controlled 667 * PCI config accesses initiated from EEH itself are allowed. 668 */ 669 rc = eeh_pe_reset_full(pe, false); 670 if (rc) 671 return rc; 672 673 /* Restore PE */ 674 eeh_ops->configure_bridge(pe); 675 eeh_pe_restore_bars(pe); 676 677 /* Clear frozen state */ 678 rc = eeh_clear_pe_frozen_state(pe, false); 679 if (rc) { 680 return rc; 681 } 682 683 /* Give the system 5 seconds to finish running the user-space 684 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes, 685 * this is a hack, but if we don't do this, and try to bring 686 * the device up before the scripts have taken it down, 687 * potentially weird things happen. 688 */ 689 if (!driver_eeh_aware || rmv_data->removed_dev_count) { 690 pr_info("EEH: Sleep 5s ahead of %s hotplug\n", 691 (driver_eeh_aware ? "partial" : "complete")); 692 ssleep(5); 693 694 /* 695 * The EEH device is still connected with its parent 696 * PE. We should disconnect it so the binding can be 697 * rebuilt when adding PCI devices. 698 */ 699 edev = list_first_entry(&pe->edevs, struct eeh_dev, entry); 700 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL); 701 if (pe->type & EEH_PE_VF) { 702 eeh_add_virt_device(edev); 703 } else { 704 if (!driver_eeh_aware) 705 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true); 706 pci_hp_add_devices(bus); 707 } 708 } 709 eeh_pe_state_clear(pe, EEH_PE_KEEP, true); 710 711 pe->tstamp = tstamp; 712 pe->freeze_count = cnt; 713 714 return 0; 715 } 716 717 /* The longest amount of time to wait for a pci device 718 * to come back on line, in seconds. 719 */ 720 #define MAX_WAIT_FOR_RECOVERY 300 721 722 723 /* Walks the PE tree after processing an event to remove any stale PEs. 724 * 725 * NB: This needs to be recursive to ensure the leaf PEs get removed 726 * before their parents do. Although this is possible to do recursively 727 * we don't since this is easier to read and we need to garantee 728 * the leaf nodes will be handled first. 729 */ 730 static void eeh_pe_cleanup(struct eeh_pe *pe) 731 { 732 struct eeh_pe *child_pe, *tmp; 733 734 list_for_each_entry_safe(child_pe, tmp, &pe->child_list, child) 735 eeh_pe_cleanup(child_pe); 736 737 if (pe->state & EEH_PE_KEEP) 738 return; 739 740 if (!(pe->state & EEH_PE_INVALID)) 741 return; 742 743 if (list_empty(&pe->edevs) && list_empty(&pe->child_list)) { 744 list_del(&pe->child); 745 kfree(pe); 746 } 747 } 748 749 /** 750 * eeh_check_slot_presence - Check if a device is still present in a slot 751 * @pdev: pci_dev to check 752 * 753 * This function may return a false positive if we can't determine the slot's 754 * presence state. This might happen for PCIe slots if the PE containing 755 * the upstream bridge is also frozen, or the bridge is part of the same PE 756 * as the device. 757 * 758 * This shouldn't happen often, but you might see it if you hotplug a PCIe 759 * switch. 760 */ 761 static bool eeh_slot_presence_check(struct pci_dev *pdev) 762 { 763 const struct hotplug_slot_ops *ops; 764 struct pci_slot *slot; 765 u8 state; 766 int rc; 767 768 if (!pdev) 769 return false; 770 771 if (pdev->error_state == pci_channel_io_perm_failure) 772 return false; 773 774 slot = pdev->slot; 775 if (!slot || !slot->hotplug) 776 return true; 777 778 ops = slot->hotplug->ops; 779 if (!ops || !ops->get_adapter_status) 780 return true; 781 782 /* set the attention indicator while we've got the slot ops */ 783 if (ops->set_attention_status) 784 ops->set_attention_status(slot->hotplug, 1); 785 786 rc = ops->get_adapter_status(slot->hotplug, &state); 787 if (rc) 788 return true; 789 790 return !!state; 791 } 792 793 static void eeh_clear_slot_attention(struct pci_dev *pdev) 794 { 795 const struct hotplug_slot_ops *ops; 796 struct pci_slot *slot; 797 798 if (!pdev) 799 return; 800 801 if (pdev->error_state == pci_channel_io_perm_failure) 802 return; 803 804 slot = pdev->slot; 805 if (!slot || !slot->hotplug) 806 return; 807 808 ops = slot->hotplug->ops; 809 if (!ops || !ops->set_attention_status) 810 return; 811 812 ops->set_attention_status(slot->hotplug, 0); 813 } 814 815 /** 816 * eeh_handle_normal_event - Handle EEH events on a specific PE 817 * @pe: EEH PE - which should not be used after we return, as it may 818 * have been invalidated. 819 * 820 * Attempts to recover the given PE. If recovery fails or the PE has failed 821 * too many times, remove the PE. 822 * 823 * While PHB detects address or data parity errors on particular PCI 824 * slot, the associated PE will be frozen. Besides, DMA's occurring 825 * to wild addresses (which usually happen due to bugs in device 826 * drivers or in PCI adapter firmware) can cause EEH error. #SERR, 827 * #PERR or other misc PCI-related errors also can trigger EEH errors. 828 * 829 * Recovery process consists of unplugging the device driver (which 830 * generated hotplug events to userspace), then issuing a PCI #RST to 831 * the device, then reconfiguring the PCI config space for all bridges 832 * & devices under this slot, and then finally restarting the device 833 * drivers (which cause a second set of hotplug events to go out to 834 * userspace). 835 */ 836 void eeh_handle_normal_event(struct eeh_pe *pe) 837 { 838 struct pci_bus *bus; 839 struct eeh_dev *edev, *tmp; 840 struct eeh_pe *tmp_pe; 841 int rc = 0; 842 enum pci_ers_result result = PCI_ERS_RESULT_NONE; 843 struct eeh_rmv_data rmv_data = 844 {LIST_HEAD_INIT(rmv_data.removed_vf_list), 0}; 845 int devices = 0; 846 847 pci_lock_rescan_remove(); 848 849 bus = eeh_pe_bus_get_nolock(pe); 850 if (!bus) { 851 pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n", 852 __func__, pe->phb->global_number, pe->addr); 853 pci_unlock_rescan_remove(); 854 return; 855 } 856 857 /* 858 * When devices are hot-removed we might get an EEH due to 859 * a driver attempting to touch the MMIO space of a removed 860 * device. In this case we don't have a device to recover 861 * so suppress the event if we can't find any present devices. 862 * 863 * The hotplug driver should take care of tearing down the 864 * device itself. 865 */ 866 eeh_for_each_pe(pe, tmp_pe) 867 eeh_pe_for_each_dev(tmp_pe, edev, tmp) 868 if (eeh_slot_presence_check(edev->pdev)) 869 devices++; 870 871 if (!devices) { 872 pr_warn("EEH: Frozen PHB#%x-PE#%x is empty!\n", 873 pe->phb->global_number, pe->addr); 874 /* 875 * The device is removed, tear down its state, on powernv 876 * hotplug driver would take care of it but not on pseries, 877 * permanently disable the card as it is hot removed. 878 * 879 * In the case of powernv, note that the removal of device 880 * is covered by pci rescan lock, so no problem even if hotplug 881 * driver attempts to remove the device. 882 */ 883 goto recover_failed; 884 } 885 886 /* Log the event */ 887 if (pe->type & EEH_PE_PHB) { 888 pr_err("EEH: Recovering PHB#%x, location: %s\n", 889 pe->phb->global_number, eeh_pe_loc_get_bus(bus)); 890 } else { 891 struct eeh_pe *phb_pe = eeh_phb_pe_get(pe->phb); 892 893 pr_err("EEH: Recovering PHB#%x-PE#%x\n", 894 pe->phb->global_number, pe->addr); 895 pr_err("EEH: PE location: %s, PHB location: %s\n", 896 eeh_pe_loc_get_bus(bus), 897 eeh_pe_loc_get_bus(eeh_pe_bus_get_nolock(phb_pe))); 898 } 899 900 #ifdef CONFIG_STACKTRACE 901 /* 902 * Print the saved stack trace now that we've verified there's 903 * something to recover. 904 */ 905 if (pe->trace_entries) { 906 void **ptrs = (void **) pe->stack_trace; 907 int i; 908 909 pr_err("EEH: Frozen PHB#%x-PE#%x detected\n", 910 pe->phb->global_number, pe->addr); 911 912 /* FIXME: Use the same format as dump_stack() */ 913 pr_err("EEH: Call Trace:\n"); 914 for (i = 0; i < pe->trace_entries; i++) 915 pr_err("EEH: [%p] %pS\n", ptrs[i], ptrs[i]); 916 917 pe->trace_entries = 0; 918 } 919 #endif /* CONFIG_STACKTRACE */ 920 921 eeh_for_each_pe(pe, tmp_pe) 922 eeh_pe_for_each_dev(tmp_pe, edev, tmp) 923 edev->mode &= ~EEH_DEV_NO_HANDLER; 924 925 eeh_pe_update_time_stamp(pe); 926 pe->freeze_count++; 927 if (pe->freeze_count > eeh_max_freezes) { 928 pr_err("EEH: PHB#%x-PE#%x has failed %d times in the last hour and has been permanently disabled.\n", 929 pe->phb->global_number, pe->addr, 930 pe->freeze_count); 931 932 goto recover_failed; 933 } 934 935 /* Walk the various device drivers attached to this slot through 936 * a reset sequence, giving each an opportunity to do what it needs 937 * to accomplish the reset. Each child gets a report of the 938 * status ... if any child can't handle the reset, then the entire 939 * slot is dlpar removed and added. 940 * 941 * When the PHB is fenced, we have to issue a reset to recover from 942 * the error. Override the result if necessary to have partially 943 * hotplug for this case. 944 */ 945 pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n", 946 pe->freeze_count, eeh_max_freezes); 947 pr_info("EEH: Notify device drivers to shutdown\n"); 948 eeh_set_channel_state(pe, pci_channel_io_frozen); 949 eeh_set_irq_state(pe, false); 950 eeh_pe_report("error_detected(IO frozen)", pe, 951 eeh_report_error, &result); 952 if (result == PCI_ERS_RESULT_DISCONNECT) 953 goto recover_failed; 954 955 /* 956 * Error logged on a PHB are always fences which need a full 957 * PHB reset to clear so force that to happen. 958 */ 959 if ((pe->type & EEH_PE_PHB) && result != PCI_ERS_RESULT_NONE) 960 result = PCI_ERS_RESULT_NEED_RESET; 961 962 /* Get the current PCI slot state. This can take a long time, 963 * sometimes over 300 seconds for certain systems. 964 */ 965 rc = eeh_wait_state(pe, MAX_WAIT_FOR_RECOVERY * 1000); 966 if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) { 967 pr_warn("EEH: Permanent failure\n"); 968 goto recover_failed; 969 } 970 971 /* Since rtas may enable MMIO when posting the error log, 972 * don't post the error log until after all dev drivers 973 * have been informed. 974 */ 975 pr_info("EEH: Collect temporary log\n"); 976 eeh_slot_error_detail(pe, EEH_LOG_TEMP); 977 978 /* If all device drivers were EEH-unaware, then shut 979 * down all of the device drivers, and hope they 980 * go down willingly, without panicing the system. 981 */ 982 if (result == PCI_ERS_RESULT_NONE) { 983 pr_info("EEH: Reset with hotplug activity\n"); 984 rc = eeh_reset_device(pe, bus, NULL, false); 985 if (rc) { 986 pr_warn("%s: Unable to reset, err=%d\n", __func__, rc); 987 goto recover_failed; 988 } 989 } 990 991 /* If all devices reported they can proceed, then re-enable MMIO */ 992 if (result == PCI_ERS_RESULT_CAN_RECOVER) { 993 pr_info("EEH: Enable I/O for affected devices\n"); 994 rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); 995 if (rc < 0) 996 goto recover_failed; 997 998 if (rc) { 999 result = PCI_ERS_RESULT_NEED_RESET; 1000 } else { 1001 pr_info("EEH: Notify device drivers to resume I/O\n"); 1002 eeh_pe_report("mmio_enabled", pe, 1003 eeh_report_mmio_enabled, &result); 1004 } 1005 } 1006 if (result == PCI_ERS_RESULT_CAN_RECOVER) { 1007 pr_info("EEH: Enabled DMA for affected devices\n"); 1008 rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA); 1009 if (rc < 0) 1010 goto recover_failed; 1011 1012 if (rc) { 1013 result = PCI_ERS_RESULT_NEED_RESET; 1014 } else { 1015 /* 1016 * We didn't do PE reset for the case. The PE 1017 * is still in frozen state. Clear it before 1018 * resuming the PE. 1019 */ 1020 eeh_pe_state_clear(pe, EEH_PE_ISOLATED, true); 1021 result = PCI_ERS_RESULT_RECOVERED; 1022 } 1023 } 1024 1025 /* If any device called out for a reset, then reset the slot */ 1026 if (result == PCI_ERS_RESULT_NEED_RESET) { 1027 pr_info("EEH: Reset without hotplug activity\n"); 1028 rc = eeh_reset_device(pe, bus, &rmv_data, true); 1029 if (rc) { 1030 pr_warn("%s: Cannot reset, err=%d\n", __func__, rc); 1031 goto recover_failed; 1032 } 1033 1034 result = PCI_ERS_RESULT_NONE; 1035 eeh_set_channel_state(pe, pci_channel_io_normal); 1036 eeh_set_irq_state(pe, true); 1037 eeh_pe_report("slot_reset", pe, eeh_report_reset, 1038 &result); 1039 } 1040 1041 if ((result == PCI_ERS_RESULT_RECOVERED) || 1042 (result == PCI_ERS_RESULT_NONE)) { 1043 /* 1044 * For those hot removed VFs, we should add back them after PF 1045 * get recovered properly. 1046 */ 1047 list_for_each_entry_safe(edev, tmp, &rmv_data.removed_vf_list, 1048 rmv_entry) { 1049 eeh_add_virt_device(edev); 1050 list_del(&edev->rmv_entry); 1051 } 1052 1053 /* Tell all device drivers that they can resume operations */ 1054 pr_info("EEH: Notify device driver to resume\n"); 1055 eeh_set_channel_state(pe, pci_channel_io_normal); 1056 eeh_set_irq_state(pe, true); 1057 eeh_pe_report("resume", pe, eeh_report_resume, NULL); 1058 eeh_for_each_pe(pe, tmp_pe) { 1059 eeh_pe_for_each_dev(tmp_pe, edev, tmp) { 1060 edev->mode &= ~EEH_DEV_NO_HANDLER; 1061 edev->in_error = false; 1062 } 1063 } 1064 1065 pr_info("EEH: Recovery successful.\n"); 1066 goto out; 1067 } 1068 1069 recover_failed: 1070 /* 1071 * About 90% of all real-life EEH failures in the field 1072 * are due to poorly seated PCI cards. Only 10% or so are 1073 * due to actual, failed cards. 1074 */ 1075 pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n" 1076 "Please try reseating or replacing it\n", 1077 pe->phb->global_number, pe->addr); 1078 1079 eeh_slot_error_detail(pe, EEH_LOG_PERM); 1080 1081 /* Notify all devices that they're about to go down. */ 1082 eeh_set_irq_state(pe, false); 1083 eeh_pe_report("error_detected(permanent failure)", pe, 1084 eeh_report_failure, NULL); 1085 eeh_set_channel_state(pe, pci_channel_io_perm_failure); 1086 1087 /* Mark the PE to be removed permanently */ 1088 eeh_pe_state_mark(pe, EEH_PE_REMOVED); 1089 1090 /* 1091 * Shut down the device drivers for good. We mark 1092 * all removed devices correctly to avoid access 1093 * the their PCI config any more. 1094 */ 1095 if (pe->type & EEH_PE_VF) { 1096 eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL); 1097 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); 1098 } else { 1099 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true); 1100 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); 1101 1102 bus = eeh_pe_bus_get_nolock(pe); 1103 if (bus) 1104 pci_hp_remove_devices(bus); 1105 else 1106 pr_err("%s: PCI bus for PHB#%x-PE#%x disappeared\n", 1107 __func__, pe->phb->global_number, pe->addr); 1108 1109 /* The passed PE should no longer be used */ 1110 pci_unlock_rescan_remove(); 1111 return; 1112 } 1113 1114 out: 1115 /* 1116 * Clean up any PEs without devices. While marked as EEH_PE_RECOVERYING 1117 * we don't want to modify the PE tree structure so we do it here. 1118 */ 1119 eeh_pe_cleanup(pe); 1120 1121 /* clear the slot attention LED for all recovered devices */ 1122 eeh_for_each_pe(pe, tmp_pe) 1123 eeh_pe_for_each_dev(tmp_pe, edev, tmp) 1124 eeh_clear_slot_attention(edev->pdev); 1125 1126 eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true); 1127 1128 pci_unlock_rescan_remove(); 1129 } 1130 1131 /** 1132 * eeh_handle_special_event - Handle EEH events without a specific failing PE 1133 * 1134 * Called when an EEH event is detected but can't be narrowed down to a 1135 * specific PE. Iterates through possible failures and handles them as 1136 * necessary. 1137 */ 1138 void eeh_handle_special_event(void) 1139 { 1140 struct eeh_pe *pe, *phb_pe, *tmp_pe; 1141 struct eeh_dev *edev, *tmp_edev; 1142 struct pci_bus *bus; 1143 struct pci_controller *hose; 1144 unsigned long flags; 1145 int rc; 1146 1147 pci_lock_rescan_remove(); 1148 1149 do { 1150 rc = eeh_ops->next_error(&pe); 1151 1152 switch (rc) { 1153 case EEH_NEXT_ERR_DEAD_IOC: 1154 /* Mark all PHBs in dead state */ 1155 eeh_serialize_lock(&flags); 1156 1157 /* Purge all events */ 1158 eeh_remove_event(NULL, true); 1159 1160 list_for_each_entry(hose, &hose_list, list_node) { 1161 phb_pe = eeh_phb_pe_get(hose); 1162 if (!phb_pe) continue; 1163 1164 eeh_pe_mark_isolated(phb_pe); 1165 } 1166 1167 eeh_serialize_unlock(flags); 1168 1169 break; 1170 case EEH_NEXT_ERR_FROZEN_PE: 1171 case EEH_NEXT_ERR_FENCED_PHB: 1172 case EEH_NEXT_ERR_DEAD_PHB: 1173 /* Mark the PE in fenced state */ 1174 eeh_serialize_lock(&flags); 1175 1176 /* Purge all events of the PHB */ 1177 eeh_remove_event(pe, true); 1178 1179 if (rc != EEH_NEXT_ERR_DEAD_PHB) 1180 eeh_pe_state_mark(pe, EEH_PE_RECOVERING); 1181 eeh_pe_mark_isolated(pe); 1182 1183 eeh_serialize_unlock(flags); 1184 1185 break; 1186 case EEH_NEXT_ERR_NONE: 1187 pci_unlock_rescan_remove(); 1188 return; 1189 default: 1190 pr_warn("%s: Invalid value %d from next_error()\n", 1191 __func__, rc); 1192 pci_unlock_rescan_remove(); 1193 return; 1194 } 1195 1196 /* 1197 * For fenced PHB and frozen PE, it's handled as normal 1198 * event. We have to remove the affected PHBs for dead 1199 * PHB and IOC 1200 */ 1201 if (rc == EEH_NEXT_ERR_FROZEN_PE || 1202 rc == EEH_NEXT_ERR_FENCED_PHB) { 1203 eeh_pe_state_mark(pe, EEH_PE_RECOVERING); 1204 pci_unlock_rescan_remove(); 1205 eeh_handle_normal_event(pe); 1206 pci_lock_rescan_remove(); 1207 } else { 1208 eeh_for_each_pe(pe, tmp_pe) 1209 eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev) 1210 edev->mode &= ~EEH_DEV_NO_HANDLER; 1211 1212 /* Notify all devices to be down */ 1213 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true); 1214 eeh_pe_report( 1215 "error_detected(permanent failure)", pe, 1216 eeh_report_failure, NULL); 1217 eeh_set_channel_state(pe, pci_channel_io_perm_failure); 1218 1219 list_for_each_entry(hose, &hose_list, list_node) { 1220 phb_pe = eeh_phb_pe_get(hose); 1221 if (!phb_pe || 1222 !(phb_pe->state & EEH_PE_ISOLATED) || 1223 (phb_pe->state & EEH_PE_RECOVERING)) 1224 continue; 1225 1226 bus = eeh_pe_bus_get_nolock(phb_pe); 1227 if (!bus) { 1228 pr_err("%s: Cannot find PCI bus for " 1229 "PHB#%x-PE#%x\n", 1230 __func__, 1231 pe->phb->global_number, 1232 pe->addr); 1233 break; 1234 } 1235 pci_hp_remove_devices(bus); 1236 } 1237 } 1238 1239 /* 1240 * If we have detected dead IOC, we needn't proceed 1241 * any more since all PHBs would have been removed 1242 */ 1243 if (rc == EEH_NEXT_ERR_DEAD_IOC) 1244 break; 1245 } while (rc != EEH_NEXT_ERR_NONE); 1246 1247 pci_unlock_rescan_remove(); 1248 } 1249