1*1a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 2317f06deSGavin Shan /* 3317f06deSGavin Shan * 4317f06deSGavin Shan * Copyright (c) 2005 Linas Vepstas <linas@linas.org> 5317f06deSGavin Shan */ 6317f06deSGavin Shan 7317f06deSGavin Shan #include <linux/delay.h> 8317f06deSGavin Shan #include <linux/list.h> 9317f06deSGavin Shan #include <linux/sched.h> 10c8608558SGavin Shan #include <linux/semaphore.h> 11317f06deSGavin Shan #include <linux/pci.h> 12317f06deSGavin Shan #include <linux/slab.h> 13317f06deSGavin Shan #include <linux/kthread.h> 14317f06deSGavin Shan #include <asm/eeh_event.h> 15317f06deSGavin Shan #include <asm/ppc-pci.h> 16317f06deSGavin Shan 17317f06deSGavin Shan /** Overview: 18317f06deSGavin Shan * EEH error states may be detected within exception handlers; 19317f06deSGavin Shan * however, the recovery processing needs to occur asynchronously 20317f06deSGavin Shan * in a normal kernel context and not an interrupt context. 21317f06deSGavin Shan * This pair of routines creates an event and queues it onto a 22317f06deSGavin Shan * work-queue, where a worker thread can drive recovery. 23317f06deSGavin Shan */ 24317f06deSGavin Shan 25317f06deSGavin Shan static DEFINE_SPINLOCK(eeh_eventlist_lock); 262fea82dbSArnd Bergmann static DECLARE_COMPLETION(eeh_eventlist_event); 27635218c7SDaniel Axtens static LIST_HEAD(eeh_eventlist); 28317f06deSGavin Shan 29317f06deSGavin Shan /** 30317f06deSGavin Shan * eeh_event_handler - Dispatch EEH events. 31317f06deSGavin Shan * @dummy - unused 32317f06deSGavin Shan * 33317f06deSGavin Shan * The detection of a frozen slot can occur inside an interrupt, 34317f06deSGavin Shan * where it can be hard to do anything about it. The goal of this 35317f06deSGavin Shan * routine is to pull these detection events out of the context 36317f06deSGavin Shan * of the interrupt handler, and re-dispatch them for processing 37317f06deSGavin Shan * at a later time in a normal context. 38317f06deSGavin Shan */ 39317f06deSGavin Shan static int eeh_event_handler(void * dummy) 40317f06deSGavin Shan { 41317f06deSGavin Shan unsigned long flags; 42317f06deSGavin Shan struct eeh_event *event; 43317f06deSGavin Shan struct eeh_pe *pe; 44317f06deSGavin Shan 45c8608558SGavin Shan while (!kthread_should_stop()) { 462fea82dbSArnd Bergmann if (wait_for_completion_interruptible(&eeh_eventlist_event)) 475459ae14SGavin Shan break; 48c8608558SGavin Shan 49c8608558SGavin Shan /* Fetch EEH event from the queue */ 50317f06deSGavin Shan spin_lock_irqsave(&eeh_eventlist_lock, flags); 51317f06deSGavin Shan event = NULL; 52317f06deSGavin Shan if (!list_empty(&eeh_eventlist)) { 53c8608558SGavin Shan event = list_entry(eeh_eventlist.next, 54c8608558SGavin Shan struct eeh_event, list); 55317f06deSGavin Shan list_del(&event->list); 56317f06deSGavin Shan } 57317f06deSGavin Shan spin_unlock_irqrestore(&eeh_eventlist_lock, flags); 58c8608558SGavin Shan if (!event) 59c8608558SGavin Shan continue; 60317f06deSGavin Shan 61c8608558SGavin Shan /* We might have event without binding PE */ 62317f06deSGavin Shan pe = event->pe; 63c8608558SGavin Shan if (pe) { 640b5381a6SGavin Shan if (pe->type & EEH_PE_PHB) 651f52f176SRussell Currey pr_info("EEH: Detected error on PHB#%x\n", 660b5381a6SGavin Shan pe->phb->global_number); 670b5381a6SGavin Shan else 680b5381a6SGavin Shan pr_info("EEH: Detected PCI bus error on " 691f52f176SRussell Currey "PHB#%x-PE#%x\n", 70317f06deSGavin Shan pe->phb->global_number, pe->addr); 7168701780SSam Bobroff eeh_handle_normal_event(pe); 72c8608558SGavin Shan } else { 7368701780SSam Bobroff eeh_handle_special_event(); 74c8608558SGavin Shan } 75317f06deSGavin Shan 76317f06deSGavin Shan kfree(event); 77317f06deSGavin Shan } 78317f06deSGavin Shan 79317f06deSGavin Shan return 0; 80317f06deSGavin Shan } 81317f06deSGavin Shan 82317f06deSGavin Shan /** 83c8608558SGavin Shan * eeh_event_init - Start kernel thread to handle EEH events 84317f06deSGavin Shan * 85317f06deSGavin Shan * This routine is called to start the kernel thread for processing 86317f06deSGavin Shan * EEH event. 87317f06deSGavin Shan */ 88c8608558SGavin Shan int eeh_event_init(void) 89317f06deSGavin Shan { 90c8608558SGavin Shan struct task_struct *t; 91c8608558SGavin Shan int ret = 0; 92c8608558SGavin Shan 93c8608558SGavin Shan t = kthread_run(eeh_event_handler, NULL, "eehd"); 94c8608558SGavin Shan if (IS_ERR(t)) { 95c8608558SGavin Shan ret = PTR_ERR(t); 96c8608558SGavin Shan pr_err("%s: Failed to start EEH daemon (%d)\n", 97c8608558SGavin Shan __func__, ret); 98c8608558SGavin Shan return ret; 99c8608558SGavin Shan } 100c8608558SGavin Shan 101c8608558SGavin Shan return 0; 102317f06deSGavin Shan } 103317f06deSGavin Shan 104317f06deSGavin Shan /** 105317f06deSGavin Shan * eeh_send_failure_event - Generate a PCI error event 106317f06deSGavin Shan * @pe: EEH PE 107317f06deSGavin Shan * 108317f06deSGavin Shan * This routine can be called within an interrupt context; 109317f06deSGavin Shan * the actual event will be delivered in a normal context 110317f06deSGavin Shan * (from a workqueue). 111317f06deSGavin Shan */ 112954bd994SOliver O'Halloran int __eeh_send_failure_event(struct eeh_pe *pe) 113317f06deSGavin Shan { 114317f06deSGavin Shan unsigned long flags; 115317f06deSGavin Shan struct eeh_event *event; 116317f06deSGavin Shan 117317f06deSGavin Shan event = kzalloc(sizeof(*event), GFP_ATOMIC); 118317f06deSGavin Shan if (!event) { 119317f06deSGavin Shan pr_err("EEH: out of memory, event not handled\n"); 120317f06deSGavin Shan return -ENOMEM; 121317f06deSGavin Shan } 122317f06deSGavin Shan event->pe = pe; 123317f06deSGavin Shan 124317f06deSGavin Shan /* We may or may not be called in an interrupt context */ 125317f06deSGavin Shan spin_lock_irqsave(&eeh_eventlist_lock, flags); 126317f06deSGavin Shan list_add(&event->list, &eeh_eventlist); 127317f06deSGavin Shan spin_unlock_irqrestore(&eeh_eventlist_lock, flags); 128317f06deSGavin Shan 129c8608558SGavin Shan /* For EEH deamon to knick in */ 1302fea82dbSArnd Bergmann complete(&eeh_eventlist_event); 131317f06deSGavin Shan 132317f06deSGavin Shan return 0; 133317f06deSGavin Shan } 13499866595SGavin Shan 135954bd994SOliver O'Halloran int eeh_send_failure_event(struct eeh_pe *pe) 136954bd994SOliver O'Halloran { 137954bd994SOliver O'Halloran /* 138954bd994SOliver O'Halloran * If we've manually supressed recovery events via debugfs 139954bd994SOliver O'Halloran * then just drop it on the floor. 140954bd994SOliver O'Halloran */ 141954bd994SOliver O'Halloran if (eeh_debugfs_no_recover) { 142954bd994SOliver O'Halloran pr_err("EEH: Event dropped due to no_recover setting\n"); 143954bd994SOliver O'Halloran return 0; 144954bd994SOliver O'Halloran } 145954bd994SOliver O'Halloran 146954bd994SOliver O'Halloran return __eeh_send_failure_event(pe); 147954bd994SOliver O'Halloran } 148954bd994SOliver O'Halloran 14999866595SGavin Shan /** 15099866595SGavin Shan * eeh_remove_event - Remove EEH event from the queue 15199866595SGavin Shan * @pe: Event binding to the PE 1525c7a35e3SGavin Shan * @force: Event will be removed unconditionally 15399866595SGavin Shan * 15499866595SGavin Shan * On PowerNV platform, we might have subsequent coming events 15599866595SGavin Shan * is part of the former one. For that case, those subsequent 15699866595SGavin Shan * coming events are totally duplicated and unnecessary, thus 15799866595SGavin Shan * they should be removed. 15899866595SGavin Shan */ 1595c7a35e3SGavin Shan void eeh_remove_event(struct eeh_pe *pe, bool force) 16099866595SGavin Shan { 16199866595SGavin Shan unsigned long flags; 16299866595SGavin Shan struct eeh_event *event, *tmp; 16399866595SGavin Shan 1645c7a35e3SGavin Shan /* 1655c7a35e3SGavin Shan * If we have NULL PE passed in, we have dead IOC 1665c7a35e3SGavin Shan * or we're sure we can report all existing errors 1675c7a35e3SGavin Shan * by the caller. 1685c7a35e3SGavin Shan * 1695c7a35e3SGavin Shan * With "force", the event with associated PE that 1705c7a35e3SGavin Shan * have been isolated, the event won't be removed 1715c7a35e3SGavin Shan * to avoid event lost. 1725c7a35e3SGavin Shan */ 17399866595SGavin Shan spin_lock_irqsave(&eeh_eventlist_lock, flags); 17499866595SGavin Shan list_for_each_entry_safe(event, tmp, &eeh_eventlist, list) { 1755c7a35e3SGavin Shan if (!force && event->pe && 1765c7a35e3SGavin Shan (event->pe->state & EEH_PE_ISOLATED)) 1775c7a35e3SGavin Shan continue; 1785c7a35e3SGavin Shan 17999866595SGavin Shan if (!pe) { 18099866595SGavin Shan list_del(&event->list); 18199866595SGavin Shan kfree(event); 18299866595SGavin Shan } else if (pe->type & EEH_PE_PHB) { 18399866595SGavin Shan if (event->pe && event->pe->phb == pe->phb) { 18499866595SGavin Shan list_del(&event->list); 18599866595SGavin Shan kfree(event); 18699866595SGavin Shan } 18799866595SGavin Shan } else if (event->pe == pe) { 18899866595SGavin Shan list_del(&event->list); 18999866595SGavin Shan kfree(event); 19099866595SGavin Shan } 19199866595SGavin Shan } 19299866595SGavin Shan spin_unlock_irqrestore(&eeh_eventlist_lock, flags); 19399866595SGavin Shan } 194