xref: /linux/arch/powerpc/kernel/eeh_event.c (revision 6f62a8223e65c0571e48225d5d7e56de95225bae)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  * Copyright (c) 2005 Linas Vepstas <linas@linas.org>
5  */
6 
7 #include <linux/delay.h>
8 #include <linux/list.h>
9 #include <linux/sched.h>
10 #include <linux/semaphore.h>
11 #include <linux/pci.h>
12 #include <linux/slab.h>
13 #include <linux/kthread.h>
14 #include <asm/eeh_event.h>
15 #include <asm/ppc-pci.h>
16 
17 /** Overview:
18  *  EEH error states may be detected within exception handlers;
19  *  however, the recovery processing needs to occur asynchronously
20  *  in a normal kernel context and not an interrupt context.
21  *  This pair of routines creates an event and queues it onto a
22  *  work-queue, where a worker thread can drive recovery.
23  */
24 
25 static DEFINE_SPINLOCK(eeh_eventlist_lock);
26 static DECLARE_COMPLETION(eeh_eventlist_event);
27 static LIST_HEAD(eeh_eventlist);
28 
29 /**
30  * eeh_event_handler - Dispatch EEH events.
31  * @dummy - unused
32  *
33  * The detection of a frozen slot can occur inside an interrupt,
34  * where it can be hard to do anything about it.  The goal of this
35  * routine is to pull these detection events out of the context
36  * of the interrupt handler, and re-dispatch them for processing
37  * at a later time in a normal context.
38  */
39 static int eeh_event_handler(void * dummy)
40 {
41 	unsigned long flags;
42 	struct eeh_event *event;
43 
44 	while (!kthread_should_stop()) {
45 		if (wait_for_completion_interruptible(&eeh_eventlist_event))
46 			break;
47 
48 		/* Fetch EEH event from the queue */
49 		spin_lock_irqsave(&eeh_eventlist_lock, flags);
50 		event = NULL;
51 		if (!list_empty(&eeh_eventlist)) {
52 			event = list_entry(eeh_eventlist.next,
53 					   struct eeh_event, list);
54 			list_del(&event->list);
55 		}
56 		spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
57 		if (!event)
58 			continue;
59 
60 		/* We might have event without binding PE */
61 		if (event->pe)
62 			eeh_handle_normal_event(event->pe);
63 		else
64 			eeh_handle_special_event();
65 
66 		kfree(event);
67 	}
68 
69 	return 0;
70 }
71 
72 /**
73  * eeh_event_init - Start kernel thread to handle EEH events
74  *
75  * This routine is called to start the kernel thread for processing
76  * EEH event.
77  */
78 int eeh_event_init(void)
79 {
80 	struct task_struct *t;
81 	int ret = 0;
82 
83 	t = kthread_run(eeh_event_handler, NULL, "eehd");
84 	if (IS_ERR(t)) {
85 		ret = PTR_ERR(t);
86 		pr_err("%s: Failed to start EEH daemon (%d)\n",
87 			__func__, ret);
88 		return ret;
89 	}
90 
91 	return 0;
92 }
93 
94 /**
95  * eeh_send_failure_event - Generate a PCI error event
96  * @pe: EEH PE
97  *
98  * This routine can be called within an interrupt context;
99  * the actual event will be delivered in a normal context
100  * (from a workqueue).
101  */
102 int __eeh_send_failure_event(struct eeh_pe *pe)
103 {
104 	unsigned long flags;
105 	struct eeh_event *event;
106 
107 	event = kzalloc(sizeof(*event), GFP_ATOMIC);
108 	if (!event) {
109 		pr_err("EEH: out of memory, event not handled\n");
110 		return -ENOMEM;
111 	}
112 	event->pe = pe;
113 
114 	/*
115 	 * Mark the PE as recovering before inserting it in the queue.
116 	 * This prevents the PE from being free()ed by a hotplug driver
117 	 * while the PE is sitting in the event queue.
118 	 */
119 	if (pe) {
120 		/*
121 		 * Save the current stack trace so we can dump it from the
122 		 * event handler thread.
123 		 */
124 		pe->trace_entries = stack_trace_save(pe->stack_trace,
125 					 ARRAY_SIZE(pe->stack_trace), 0);
126 
127 		eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
128 	}
129 
130 	/* We may or may not be called in an interrupt context */
131 	spin_lock_irqsave(&eeh_eventlist_lock, flags);
132 	list_add(&event->list, &eeh_eventlist);
133 	spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
134 
135 	/* For EEH deamon to knick in */
136 	complete(&eeh_eventlist_event);
137 
138 	return 0;
139 }
140 
141 int eeh_send_failure_event(struct eeh_pe *pe)
142 {
143 	/*
144 	 * If we've manually supressed recovery events via debugfs
145 	 * then just drop it on the floor.
146 	 */
147 	if (eeh_debugfs_no_recover) {
148 		pr_err("EEH: Event dropped due to no_recover setting\n");
149 		return 0;
150 	}
151 
152 	return __eeh_send_failure_event(pe);
153 }
154 
155 /**
156  * eeh_remove_event - Remove EEH event from the queue
157  * @pe: Event binding to the PE
158  * @force: Event will be removed unconditionally
159  *
160  * On PowerNV platform, we might have subsequent coming events
161  * is part of the former one. For that case, those subsequent
162  * coming events are totally duplicated and unnecessary, thus
163  * they should be removed.
164  */
165 void eeh_remove_event(struct eeh_pe *pe, bool force)
166 {
167 	unsigned long flags;
168 	struct eeh_event *event, *tmp;
169 
170 	/*
171 	 * If we have NULL PE passed in, we have dead IOC
172 	 * or we're sure we can report all existing errors
173 	 * by the caller.
174 	 *
175 	 * With "force", the event with associated PE that
176 	 * have been isolated, the event won't be removed
177 	 * to avoid event lost.
178 	 */
179 	spin_lock_irqsave(&eeh_eventlist_lock, flags);
180 	list_for_each_entry_safe(event, tmp, &eeh_eventlist, list) {
181 		if (!force && event->pe &&
182 		    (event->pe->state & EEH_PE_ISOLATED))
183 			continue;
184 
185 		if (!pe) {
186 			list_del(&event->list);
187 			kfree(event);
188 		} else if (pe->type & EEH_PE_PHB) {
189 			if (event->pe && event->pe->phb == pe->phb) {
190 				list_del(&event->list);
191 				kfree(event);
192 			}
193 		} else if (event->pe == pe) {
194 			list_del(&event->list);
195 			kfree(event);
196 		}
197 	}
198 	spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
199 }
200