xref: /linux/arch/powerpc/platforms/pseries/ras.c (revision f1b0c8d3d3b5ff9c0b14bb2383a4bc38d8922bd1)
1 /*
2  * Copyright (C) 2001 Dave Engebretsen IBM Corporation
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
17  */
18 
19 /* Change Activity:
20  * 2001/09/21 : engebret : Created with minimal EPOW and HW exception support.
21  * End Change Activity
22  */
23 
24 #include <linux/errno.h>
25 #include <linux/threads.h>
26 #include <linux/kernel_stat.h>
27 #include <linux/signal.h>
28 #include <linux/sched.h>
29 #include <linux/ioport.h>
30 #include <linux/interrupt.h>
31 #include <linux/timex.h>
32 #include <linux/init.h>
33 #include <linux/slab.h>
34 #include <linux/delay.h>
35 #include <linux/irq.h>
36 #include <linux/random.h>
37 #include <linux/sysrq.h>
38 #include <linux/bitops.h>
39 
40 #include <asm/uaccess.h>
41 #include <asm/system.h>
42 #include <asm/io.h>
43 #include <asm/pgtable.h>
44 #include <asm/irq.h>
45 #include <asm/cache.h>
46 #include <asm/prom.h>
47 #include <asm/ptrace.h>
48 #include <asm/machdep.h>
49 #include <asm/rtas.h>
50 #include <asm/udbg.h>
51 #include <asm/firmware.h>
52 
53 #include "pseries.h"
54 
55 static unsigned char ras_log_buf[RTAS_ERROR_LOG_MAX];
56 static DEFINE_SPINLOCK(ras_log_buf_lock);
57 
58 static char mce_data_buf[RTAS_ERROR_LOG_MAX];
59 
60 static int ras_get_sensor_state_token;
61 static int ras_check_exception_token;
62 
63 #define EPOW_SENSOR_TOKEN	9
64 #define EPOW_SENSOR_INDEX	0
65 #define RAS_VECTOR_OFFSET	0x500
66 
67 static irqreturn_t ras_epow_interrupt(int irq, void *dev_id);
68 static irqreturn_t ras_error_interrupt(int irq, void *dev_id);
69 
70 
71 static void request_ras_irqs(struct device_node *np,
72 			irq_handler_t handler,
73 			const char *name)
74 {
75 	int i, index, count = 0;
76 	struct of_irq oirq;
77 	const u32 *opicprop;
78 	unsigned int opicplen;
79 	unsigned int virqs[16];
80 
81 	/* Check for obsolete "open-pic-interrupt" property. If present, then
82 	 * map those interrupts using the default interrupt host and default
83 	 * trigger
84 	 */
85 	opicprop = of_get_property(np, "open-pic-interrupt", &opicplen);
86 	if (opicprop) {
87 		opicplen /= sizeof(u32);
88 		for (i = 0; i < opicplen; i++) {
89 			if (count > 15)
90 				break;
91 			virqs[count] = irq_create_mapping(NULL, *(opicprop++));
92 			if (virqs[count] == NO_IRQ)
93 				printk(KERN_ERR "Unable to allocate interrupt "
94 				       "number for %s\n", np->full_name);
95 			else
96 				count++;
97 
98 		}
99 	}
100 	/* Else use normal interrupt tree parsing */
101 	else {
102 		/* First try to do a proper OF tree parsing */
103 		for (index = 0; of_irq_map_one(np, index, &oirq) == 0;
104 		     index++) {
105 			if (count > 15)
106 				break;
107 			virqs[count] = irq_create_of_mapping(oirq.controller,
108 							    oirq.specifier,
109 							    oirq.size);
110 			if (virqs[count] == NO_IRQ)
111 				printk(KERN_ERR "Unable to allocate interrupt "
112 				       "number for %s\n", np->full_name);
113 			else
114 				count++;
115 		}
116 	}
117 
118 	/* Now request them */
119 	for (i = 0; i < count; i++) {
120 		if (request_irq(virqs[i], handler, 0, name, NULL)) {
121 			printk(KERN_ERR "Unable to request interrupt %d for "
122 			       "%s\n", virqs[i], np->full_name);
123 			return;
124 		}
125 	}
126 }
127 
128 /*
129  * Initialize handlers for the set of interrupts caused by hardware errors
130  * and power system events.
131  */
132 static int __init init_ras_IRQ(void)
133 {
134 	struct device_node *np;
135 
136 	ras_get_sensor_state_token = rtas_token("get-sensor-state");
137 	ras_check_exception_token = rtas_token("check-exception");
138 
139 	/* Internal Errors */
140 	np = of_find_node_by_path("/event-sources/internal-errors");
141 	if (np != NULL) {
142 		request_ras_irqs(np, ras_error_interrupt, "RAS_ERROR");
143 		of_node_put(np);
144 	}
145 
146 	/* EPOW Events */
147 	np = of_find_node_by_path("/event-sources/epow-events");
148 	if (np != NULL) {
149 		request_ras_irqs(np, ras_epow_interrupt, "RAS_EPOW");
150 		of_node_put(np);
151 	}
152 
153 	return 0;
154 }
155 __initcall(init_ras_IRQ);
156 
157 /*
158  * Handle power subsystem events (EPOW).
159  *
160  * Presently we just log the event has occurred.  This should be fixed
161  * to examine the type of power failure and take appropriate action where
162  * the time horizon permits something useful to be done.
163  */
164 static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
165 {
166 	int status = 0xdeadbeef;
167 	int state = 0;
168 	int critical;
169 
170 	status = rtas_call(ras_get_sensor_state_token, 2, 2, &state,
171 			   EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX);
172 
173 	if (state > 3)
174 		critical = 1;  /* Time Critical */
175 	else
176 		critical = 0;
177 
178 	spin_lock(&ras_log_buf_lock);
179 
180 	status = rtas_call(ras_check_exception_token, 6, 1, NULL,
181 			   RAS_VECTOR_OFFSET,
182 			   irq_map[irq].hwirq,
183 			   RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS,
184 			   critical, __pa(&ras_log_buf),
185 				rtas_get_error_log_max());
186 
187 	udbg_printf("EPOW <0x%lx 0x%x 0x%x>\n",
188 		    *((unsigned long *)&ras_log_buf), status, state);
189 	printk(KERN_WARNING "EPOW <0x%lx 0x%x 0x%x>\n",
190 	       *((unsigned long *)&ras_log_buf), status, state);
191 
192 	/* format and print the extended information */
193 	log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, 0);
194 
195 	spin_unlock(&ras_log_buf_lock);
196 	return IRQ_HANDLED;
197 }
198 
199 /*
200  * Handle hardware error interrupts.
201  *
202  * RTAS check-exception is called to collect data on the exception.  If
203  * the error is deemed recoverable, we log a warning and return.
204  * For nonrecoverable errors, an error is logged and we stop all processing
205  * as quickly as possible in order to prevent propagation of the failure.
206  */
207 static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
208 {
209 	struct rtas_error_log *rtas_elog;
210 	int status = 0xdeadbeef;
211 	int fatal;
212 
213 	spin_lock(&ras_log_buf_lock);
214 
215 	status = rtas_call(ras_check_exception_token, 6, 1, NULL,
216 			   RAS_VECTOR_OFFSET,
217 			   irq_map[irq].hwirq,
218 			   RTAS_INTERNAL_ERROR, 1 /*Time Critical */,
219 			   __pa(&ras_log_buf),
220 				rtas_get_error_log_max());
221 
222 	rtas_elog = (struct rtas_error_log *)ras_log_buf;
223 
224 	if ((status == 0) && (rtas_elog->severity >= RTAS_SEVERITY_ERROR_SYNC))
225 		fatal = 1;
226 	else
227 		fatal = 0;
228 
229 	/* format and print the extended information */
230 	log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, fatal);
231 
232 	if (fatal) {
233 		udbg_printf("Fatal HW Error <0x%lx 0x%x>\n",
234 			    *((unsigned long *)&ras_log_buf), status);
235 		printk(KERN_EMERG "Error: Fatal hardware error <0x%lx 0x%x>\n",
236 		       *((unsigned long *)&ras_log_buf), status);
237 
238 #ifndef DEBUG_RTAS_POWER_OFF
239 		/* Don't actually power off when debugging so we can test
240 		 * without actually failing while injecting errors.
241 		 * Error data will not be logged to syslog.
242 		 */
243 		ppc_md.power_off();
244 #endif
245 	} else {
246 		udbg_printf("Recoverable HW Error <0x%lx 0x%x>\n",
247 			    *((unsigned long *)&ras_log_buf), status);
248 		printk(KERN_WARNING
249 		       "Warning: Recoverable hardware error <0x%lx 0x%x>\n",
250 		       *((unsigned long *)&ras_log_buf), status);
251 	}
252 
253 	spin_unlock(&ras_log_buf_lock);
254 	return IRQ_HANDLED;
255 }
256 
257 /* Get the error information for errors coming through the
258  * FWNMI vectors.  The pt_regs' r3 will be updated to reflect
259  * the actual r3 if possible, and a ptr to the error log entry
260  * will be returned if found.
261  *
262  * The mce_data_buf does not have any locks or protection around it,
263  * if a second machine check comes in, or a system reset is done
264  * before we have logged the error, then we will get corruption in the
265  * error log.  This is preferable over holding off on calling
266  * ibm,nmi-interlock which would result in us checkstopping if a
267  * second machine check did come in.
268  */
269 static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
270 {
271 	unsigned long errdata = regs->gpr[3];
272 	struct rtas_error_log *errhdr = NULL;
273 	unsigned long *savep;
274 
275 	if ((errdata >= 0x7000 && errdata < 0x7fff0) ||
276 	    (errdata >= rtas.base && errdata < rtas.base + rtas.size - 16)) {
277 		savep = __va(errdata);
278 		regs->gpr[3] = savep[0];	/* restore original r3 */
279 		memset(mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
280 		memcpy(mce_data_buf, (char *)(savep + 1), RTAS_ERROR_LOG_MAX);
281 		errhdr = (struct rtas_error_log *)mce_data_buf;
282 	} else {
283 		printk("FWNMI: corrupt r3\n");
284 	}
285 	return errhdr;
286 }
287 
288 /* Call this when done with the data returned by FWNMI_get_errinfo.
289  * It will release the saved data area for other CPUs in the
290  * partition to receive FWNMI errors.
291  */
292 static void fwnmi_release_errinfo(void)
293 {
294 	int ret = rtas_call(rtas_token("ibm,nmi-interlock"), 0, 1, NULL);
295 	if (ret != 0)
296 		printk("FWNMI: nmi-interlock failed: %d\n", ret);
297 }
298 
299 int pSeries_system_reset_exception(struct pt_regs *regs)
300 {
301 	if (fwnmi_active) {
302 		struct rtas_error_log *errhdr = fwnmi_get_errinfo(regs);
303 		if (errhdr) {
304 			/* XXX Should look at FWNMI information */
305 		}
306 		fwnmi_release_errinfo();
307 	}
308 	return 0; /* need to perform reset */
309 }
310 
311 /*
312  * See if we can recover from a machine check exception.
313  * This is only called on power4 (or above) and only via
314  * the Firmware Non-Maskable Interrupts (fwnmi) handler
315  * which provides the error analysis for us.
316  *
317  * Return 1 if corrected (or delivered a signal).
318  * Return 0 if there is nothing we can do.
319  */
320 static int recover_mce(struct pt_regs *regs, struct rtas_error_log * err)
321 {
322 	int nonfatal = 0;
323 
324 	if (err->disposition == RTAS_DISP_FULLY_RECOVERED) {
325 		/* Platform corrected itself */
326 		nonfatal = 1;
327 	} else if ((regs->msr & MSR_RI) &&
328 		   user_mode(regs) &&
329 		   err->severity == RTAS_SEVERITY_ERROR_SYNC &&
330 		   err->disposition == RTAS_DISP_NOT_RECOVERED &&
331 		   err->target == RTAS_TARGET_MEMORY &&
332 		   err->type == RTAS_TYPE_ECC_UNCORR &&
333 		   !(current->pid == 0 || is_global_init(current))) {
334 		/* Kill off a user process with an ECC error */
335 		printk(KERN_ERR "MCE: uncorrectable ecc error for pid %d\n",
336 		       current->pid);
337 		/* XXX something better for ECC error? */
338 		_exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
339 		nonfatal = 1;
340 	}
341 
342 	log_error((char *)err, ERR_TYPE_RTAS_LOG, !nonfatal);
343 
344 	return nonfatal;
345 }
346 
347 /*
348  * Handle a machine check.
349  *
350  * Note that on Power 4 and beyond Firmware Non-Maskable Interrupts (fwnmi)
351  * should be present.  If so the handler which called us tells us if the
352  * error was recovered (never true if RI=0).
353  *
354  * On hardware prior to Power 4 these exceptions were asynchronous which
355  * means we can't tell exactly where it occurred and so we can't recover.
356  */
357 int pSeries_machine_check_exception(struct pt_regs *regs)
358 {
359 	struct rtas_error_log *errp;
360 
361 	if (fwnmi_active) {
362 		errp = fwnmi_get_errinfo(regs);
363 		fwnmi_release_errinfo();
364 		if (errp && recover_mce(regs, errp))
365 			return 1;
366 	}
367 
368 	return 0;
369 }
370