xref: /linux/arch/powerpc/kernel/traps.c (revision 5e8d780d745c1619aba81fe7166c5a4b5cad2b84)
1 /*
2  *  Copyright (C) 1995-1996  Gary Thomas (gdt@linuxppc.org)
3  *
4  *  This program is free software; you can redistribute it and/or
5  *  modify it under the terms of the GNU General Public License
6  *  as published by the Free Software Foundation; either version
7  *  2 of the License, or (at your option) any later version.
8  *
9  *  Modified by Cort Dougan (cort@cs.nmt.edu)
10  *  and Paul Mackerras (paulus@samba.org)
11  */
12 
13 /*
14  * This file handles the architecture-dependent parts of hardware exceptions
15  */
16 
17 #include <linux/config.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/kernel.h>
21 #include <linux/mm.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/a.out.h>
28 #include <linux/interrupt.h>
29 #include <linux/init.h>
30 #include <linux/module.h>
31 #include <linux/prctl.h>
32 #include <linux/delay.h>
33 #include <linux/kprobes.h>
34 #include <linux/kexec.h>
35 #include <linux/backlight.h>
36 
37 #include <asm/kdebug.h>
38 #include <asm/pgtable.h>
39 #include <asm/uaccess.h>
40 #include <asm/system.h>
41 #include <asm/io.h>
42 #include <asm/machdep.h>
43 #include <asm/rtas.h>
44 #include <asm/pmc.h>
45 #ifdef CONFIG_PPC32
46 #include <asm/reg.h>
47 #endif
48 #ifdef CONFIG_PMAC_BACKLIGHT
49 #include <asm/backlight.h>
50 #endif
51 #ifdef CONFIG_PPC64
52 #include <asm/firmware.h>
53 #include <asm/processor.h>
54 #endif
55 #include <asm/kexec.h>
56 
57 #ifdef CONFIG_PPC64	/* XXX */
58 #define _IO_BASE	pci_io_base
59 #ifdef CONFIG_KEXEC
60 cpumask_t cpus_in_sr = CPU_MASK_NONE;
61 #endif
62 #endif
63 
64 #ifdef CONFIG_DEBUGGER
65 int (*__debugger)(struct pt_regs *regs);
66 int (*__debugger_ipi)(struct pt_regs *regs);
67 int (*__debugger_bpt)(struct pt_regs *regs);
68 int (*__debugger_sstep)(struct pt_regs *regs);
69 int (*__debugger_iabr_match)(struct pt_regs *regs);
70 int (*__debugger_dabr_match)(struct pt_regs *regs);
71 int (*__debugger_fault_handler)(struct pt_regs *regs);
72 
73 EXPORT_SYMBOL(__debugger);
74 EXPORT_SYMBOL(__debugger_ipi);
75 EXPORT_SYMBOL(__debugger_bpt);
76 EXPORT_SYMBOL(__debugger_sstep);
77 EXPORT_SYMBOL(__debugger_iabr_match);
78 EXPORT_SYMBOL(__debugger_dabr_match);
79 EXPORT_SYMBOL(__debugger_fault_handler);
80 #endif
81 
82 ATOMIC_NOTIFIER_HEAD(powerpc_die_chain);
83 
84 int register_die_notifier(struct notifier_block *nb)
85 {
86 	return atomic_notifier_chain_register(&powerpc_die_chain, nb);
87 }
88 EXPORT_SYMBOL(register_die_notifier);
89 
90 int unregister_die_notifier(struct notifier_block *nb)
91 {
92 	return atomic_notifier_chain_unregister(&powerpc_die_chain, nb);
93 }
94 EXPORT_SYMBOL(unregister_die_notifier);
95 
96 /*
97  * Trap & Exception support
98  */
99 
100 static DEFINE_SPINLOCK(die_lock);
101 
102 int die(const char *str, struct pt_regs *regs, long err)
103 {
104 	static int die_counter;
105 
106 	if (debugger(regs))
107 		return 1;
108 
109 	console_verbose();
110 	spin_lock_irq(&die_lock);
111 	bust_spinlocks(1);
112 #ifdef CONFIG_PMAC_BACKLIGHT
113 	mutex_lock(&pmac_backlight_mutex);
114 	if (machine_is(powermac) && pmac_backlight) {
115 		struct backlight_properties *props;
116 
117 		down(&pmac_backlight->sem);
118 		props = pmac_backlight->props;
119 		props->brightness = props->max_brightness;
120 		props->power = FB_BLANK_UNBLANK;
121 		props->update_status(pmac_backlight);
122 		up(&pmac_backlight->sem);
123 	}
124 	mutex_unlock(&pmac_backlight_mutex);
125 #endif
126 	printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
127 #ifdef CONFIG_PREEMPT
128 	printk("PREEMPT ");
129 #endif
130 #ifdef CONFIG_SMP
131 	printk("SMP NR_CPUS=%d ", NR_CPUS);
132 #endif
133 #ifdef CONFIG_DEBUG_PAGEALLOC
134 	printk("DEBUG_PAGEALLOC ");
135 #endif
136 #ifdef CONFIG_NUMA
137 	printk("NUMA ");
138 #endif
139 	printk("%s\n", ppc_md.name ? "" : ppc_md.name);
140 
141 	print_modules();
142 	show_regs(regs);
143 	bust_spinlocks(0);
144 	spin_unlock_irq(&die_lock);
145 
146 	if (kexec_should_crash(current) ||
147 		kexec_sr_activated(smp_processor_id()))
148 		crash_kexec(regs);
149 	crash_kexec_secondary(regs);
150 
151 	if (in_interrupt())
152 		panic("Fatal exception in interrupt");
153 
154 	if (panic_on_oops) {
155 #ifdef CONFIG_PPC64
156 		printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
157 		ssleep(5);
158 #endif
159 		panic("Fatal exception");
160 	}
161 	do_exit(err);
162 
163 	return 0;
164 }
165 
166 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
167 {
168 	siginfo_t info;
169 
170 	if (!user_mode(regs)) {
171 		if (die("Exception in kernel mode", regs, signr))
172 			return;
173 	}
174 
175 	memset(&info, 0, sizeof(info));
176 	info.si_signo = signr;
177 	info.si_code = code;
178 	info.si_addr = (void __user *) addr;
179 	force_sig_info(signr, &info, current);
180 
181 	/*
182 	 * Init gets no signals that it doesn't have a handler for.
183 	 * That's all very well, but if it has caused a synchronous
184 	 * exception and we ignore the resulting signal, it will just
185 	 * generate the same exception over and over again and we get
186 	 * nowhere.  Better to kill it and let the kernel panic.
187 	 */
188 	if (current->pid == 1) {
189 		__sighandler_t handler;
190 
191 		spin_lock_irq(&current->sighand->siglock);
192 		handler = current->sighand->action[signr-1].sa.sa_handler;
193 		spin_unlock_irq(&current->sighand->siglock);
194 		if (handler == SIG_DFL) {
195 			/* init has generated a synchronous exception
196 			   and it doesn't have a handler for the signal */
197 			printk(KERN_CRIT "init has generated signal %d "
198 			       "but has no handler for it\n", signr);
199 			do_exit(signr);
200 		}
201 	}
202 }
203 
204 #ifdef CONFIG_PPC64
205 void system_reset_exception(struct pt_regs *regs)
206 {
207 	/* See if any machine dependent calls */
208 	if (ppc_md.system_reset_exception) {
209 		if (ppc_md.system_reset_exception(regs))
210 			return;
211 	}
212 
213 #ifdef CONFIG_KEXEC
214 	cpu_set(smp_processor_id(), cpus_in_sr);
215 #endif
216 
217 	die("System Reset", regs, SIGABRT);
218 
219 	/* Must die if the interrupt is not recoverable */
220 	if (!(regs->msr & MSR_RI))
221 		panic("Unrecoverable System Reset");
222 
223 	/* What should we do here? We could issue a shutdown or hard reset. */
224 }
225 #endif
226 
227 /*
228  * I/O accesses can cause machine checks on powermacs.
229  * Check if the NIP corresponds to the address of a sync
230  * instruction for which there is an entry in the exception
231  * table.
232  * Note that the 601 only takes a machine check on TEA
233  * (transfer error ack) signal assertion, and does not
234  * set any of the top 16 bits of SRR1.
235  *  -- paulus.
236  */
237 static inline int check_io_access(struct pt_regs *regs)
238 {
239 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
240 	unsigned long msr = regs->msr;
241 	const struct exception_table_entry *entry;
242 	unsigned int *nip = (unsigned int *)regs->nip;
243 
244 	if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
245 	    && (entry = search_exception_tables(regs->nip)) != NULL) {
246 		/*
247 		 * Check that it's a sync instruction, or somewhere
248 		 * in the twi; isync; nop sequence that inb/inw/inl uses.
249 		 * As the address is in the exception table
250 		 * we should be able to read the instr there.
251 		 * For the debug message, we look at the preceding
252 		 * load or store.
253 		 */
254 		if (*nip == 0x60000000)		/* nop */
255 			nip -= 2;
256 		else if (*nip == 0x4c00012c)	/* isync */
257 			--nip;
258 		if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
259 			/* sync or twi */
260 			unsigned int rb;
261 
262 			--nip;
263 			rb = (*nip >> 11) & 0x1f;
264 			printk(KERN_DEBUG "%s bad port %lx at %p\n",
265 			       (*nip & 0x100)? "OUT to": "IN from",
266 			       regs->gpr[rb] - _IO_BASE, nip);
267 			regs->msr |= MSR_RI;
268 			regs->nip = entry->fixup;
269 			return 1;
270 		}
271 	}
272 #endif /* CONFIG_PPC_PMAC && CONFIG_PPC32 */
273 	return 0;
274 }
275 
276 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
277 /* On 4xx, the reason for the machine check or program exception
278    is in the ESR. */
279 #define get_reason(regs)	((regs)->dsisr)
280 #ifndef CONFIG_FSL_BOOKE
281 #define get_mc_reason(regs)	((regs)->dsisr)
282 #else
283 #define get_mc_reason(regs)	(mfspr(SPRN_MCSR))
284 #endif
285 #define REASON_FP		ESR_FP
286 #define REASON_ILLEGAL		(ESR_PIL | ESR_PUO)
287 #define REASON_PRIVILEGED	ESR_PPR
288 #define REASON_TRAP		ESR_PTR
289 
290 /* single-step stuff */
291 #define single_stepping(regs)	(current->thread.dbcr0 & DBCR0_IC)
292 #define clear_single_step(regs)	(current->thread.dbcr0 &= ~DBCR0_IC)
293 
294 #else
295 /* On non-4xx, the reason for the machine check or program
296    exception is in the MSR. */
297 #define get_reason(regs)	((regs)->msr)
298 #define get_mc_reason(regs)	((regs)->msr)
299 #define REASON_FP		0x100000
300 #define REASON_ILLEGAL		0x80000
301 #define REASON_PRIVILEGED	0x40000
302 #define REASON_TRAP		0x20000
303 
304 #define single_stepping(regs)	((regs)->msr & MSR_SE)
305 #define clear_single_step(regs)	((regs)->msr &= ~MSR_SE)
306 #endif
307 
308 /*
309  * This is "fall-back" implementation for configurations
310  * which don't provide platform-specific machine check info
311  */
312 void __attribute__ ((weak))
313 platform_machine_check(struct pt_regs *regs)
314 {
315 }
316 
317 void machine_check_exception(struct pt_regs *regs)
318 {
319 	int recover = 0;
320 	unsigned long reason = get_mc_reason(regs);
321 
322 	/* See if any machine dependent calls */
323 	if (ppc_md.machine_check_exception)
324 		recover = ppc_md.machine_check_exception(regs);
325 
326 	if (recover)
327 		return;
328 
329 	if (user_mode(regs)) {
330 		regs->msr |= MSR_RI;
331 		_exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
332 		return;
333 	}
334 
335 #if defined(CONFIG_8xx) && defined(CONFIG_PCI)
336 	/* the qspan pci read routines can cause machine checks -- Cort */
337 	bad_page_fault(regs, regs->dar, SIGBUS);
338 	return;
339 #endif
340 
341 	if (debugger_fault_handler(regs)) {
342 		regs->msr |= MSR_RI;
343 		return;
344 	}
345 
346 	if (check_io_access(regs))
347 		return;
348 
349 #if defined(CONFIG_4xx) && !defined(CONFIG_440A)
350 	if (reason & ESR_IMCP) {
351 		printk("Instruction");
352 		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
353 	} else
354 		printk("Data");
355 	printk(" machine check in kernel mode.\n");
356 #elif defined(CONFIG_440A)
357 	printk("Machine check in kernel mode.\n");
358 	if (reason & ESR_IMCP){
359 		printk("Instruction Synchronous Machine Check exception\n");
360 		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
361 	}
362 	else {
363 		u32 mcsr = mfspr(SPRN_MCSR);
364 		if (mcsr & MCSR_IB)
365 			printk("Instruction Read PLB Error\n");
366 		if (mcsr & MCSR_DRB)
367 			printk("Data Read PLB Error\n");
368 		if (mcsr & MCSR_DWB)
369 			printk("Data Write PLB Error\n");
370 		if (mcsr & MCSR_TLBP)
371 			printk("TLB Parity Error\n");
372 		if (mcsr & MCSR_ICP){
373 			flush_instruction_cache();
374 			printk("I-Cache Parity Error\n");
375 		}
376 		if (mcsr & MCSR_DCSP)
377 			printk("D-Cache Search Parity Error\n");
378 		if (mcsr & MCSR_DCFP)
379 			printk("D-Cache Flush Parity Error\n");
380 		if (mcsr & MCSR_IMPE)
381 			printk("Machine Check exception is imprecise\n");
382 
383 		/* Clear MCSR */
384 		mtspr(SPRN_MCSR, mcsr);
385 	}
386 #elif defined (CONFIG_E500)
387 	printk("Machine check in kernel mode.\n");
388 	printk("Caused by (from MCSR=%lx): ", reason);
389 
390 	if (reason & MCSR_MCP)
391 		printk("Machine Check Signal\n");
392 	if (reason & MCSR_ICPERR)
393 		printk("Instruction Cache Parity Error\n");
394 	if (reason & MCSR_DCP_PERR)
395 		printk("Data Cache Push Parity Error\n");
396 	if (reason & MCSR_DCPERR)
397 		printk("Data Cache Parity Error\n");
398 	if (reason & MCSR_GL_CI)
399 		printk("Guarded Load or Cache-Inhibited stwcx.\n");
400 	if (reason & MCSR_BUS_IAERR)
401 		printk("Bus - Instruction Address Error\n");
402 	if (reason & MCSR_BUS_RAERR)
403 		printk("Bus - Read Address Error\n");
404 	if (reason & MCSR_BUS_WAERR)
405 		printk("Bus - Write Address Error\n");
406 	if (reason & MCSR_BUS_IBERR)
407 		printk("Bus - Instruction Data Error\n");
408 	if (reason & MCSR_BUS_RBERR)
409 		printk("Bus - Read Data Bus Error\n");
410 	if (reason & MCSR_BUS_WBERR)
411 		printk("Bus - Read Data Bus Error\n");
412 	if (reason & MCSR_BUS_IPERR)
413 		printk("Bus - Instruction Parity Error\n");
414 	if (reason & MCSR_BUS_RPERR)
415 		printk("Bus - Read Parity Error\n");
416 #elif defined (CONFIG_E200)
417 	printk("Machine check in kernel mode.\n");
418 	printk("Caused by (from MCSR=%lx): ", reason);
419 
420 	if (reason & MCSR_MCP)
421 		printk("Machine Check Signal\n");
422 	if (reason & MCSR_CP_PERR)
423 		printk("Cache Push Parity Error\n");
424 	if (reason & MCSR_CPERR)
425 		printk("Cache Parity Error\n");
426 	if (reason & MCSR_EXCP_ERR)
427 		printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
428 	if (reason & MCSR_BUS_IRERR)
429 		printk("Bus - Read Bus Error on instruction fetch\n");
430 	if (reason & MCSR_BUS_DRERR)
431 		printk("Bus - Read Bus Error on data load\n");
432 	if (reason & MCSR_BUS_WRERR)
433 		printk("Bus - Write Bus Error on buffered store or cache line push\n");
434 #else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */
435 	printk("Machine check in kernel mode.\n");
436 	printk("Caused by (from SRR1=%lx): ", reason);
437 	switch (reason & 0x601F0000) {
438 	case 0x80000:
439 		printk("Machine check signal\n");
440 		break;
441 	case 0:		/* for 601 */
442 	case 0x40000:
443 	case 0x140000:	/* 7450 MSS error and TEA */
444 		printk("Transfer error ack signal\n");
445 		break;
446 	case 0x20000:
447 		printk("Data parity error signal\n");
448 		break;
449 	case 0x10000:
450 		printk("Address parity error signal\n");
451 		break;
452 	case 0x20000000:
453 		printk("L1 Data Cache error\n");
454 		break;
455 	case 0x40000000:
456 		printk("L1 Instruction Cache error\n");
457 		break;
458 	case 0x00100000:
459 		printk("L2 data cache parity error\n");
460 		break;
461 	default:
462 		printk("Unknown values in msr\n");
463 	}
464 #endif /* CONFIG_4xx */
465 
466 	/*
467 	 * Optional platform-provided routine to print out
468 	 * additional info, e.g. bus error registers.
469 	 */
470 	platform_machine_check(regs);
471 
472 	if (debugger_fault_handler(regs))
473 		return;
474 	die("Machine check", regs, SIGBUS);
475 
476 	/* Must die if the interrupt is not recoverable */
477 	if (!(regs->msr & MSR_RI))
478 		panic("Unrecoverable Machine check");
479 }
480 
481 void SMIException(struct pt_regs *regs)
482 {
483 	die("System Management Interrupt", regs, SIGABRT);
484 }
485 
486 void unknown_exception(struct pt_regs *regs)
487 {
488 	printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
489 	       regs->nip, regs->msr, regs->trap);
490 
491 	_exception(SIGTRAP, regs, 0, 0);
492 }
493 
494 void instruction_breakpoint_exception(struct pt_regs *regs)
495 {
496 	if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
497 					5, SIGTRAP) == NOTIFY_STOP)
498 		return;
499 	if (debugger_iabr_match(regs))
500 		return;
501 	_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
502 }
503 
504 void RunModeException(struct pt_regs *regs)
505 {
506 	_exception(SIGTRAP, regs, 0, 0);
507 }
508 
509 void __kprobes single_step_exception(struct pt_regs *regs)
510 {
511 	regs->msr &= ~(MSR_SE | MSR_BE);  /* Turn off 'trace' bits */
512 
513 	if (notify_die(DIE_SSTEP, "single_step", regs, 5,
514 					5, SIGTRAP) == NOTIFY_STOP)
515 		return;
516 	if (debugger_sstep(regs))
517 		return;
518 
519 	_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
520 }
521 
522 /*
523  * After we have successfully emulated an instruction, we have to
524  * check if the instruction was being single-stepped, and if so,
525  * pretend we got a single-step exception.  This was pointed out
526  * by Kumar Gala.  -- paulus
527  */
528 static void emulate_single_step(struct pt_regs *regs)
529 {
530 	if (single_stepping(regs)) {
531 		clear_single_step(regs);
532 		_exception(SIGTRAP, regs, TRAP_TRACE, 0);
533 	}
534 }
535 
536 static void parse_fpe(struct pt_regs *regs)
537 {
538 	int code = 0;
539 	unsigned long fpscr;
540 
541 	flush_fp_to_thread(current);
542 
543 	fpscr = current->thread.fpscr.val;
544 
545 	/* Invalid operation */
546 	if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
547 		code = FPE_FLTINV;
548 
549 	/* Overflow */
550 	else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
551 		code = FPE_FLTOVF;
552 
553 	/* Underflow */
554 	else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
555 		code = FPE_FLTUND;
556 
557 	/* Divide by zero */
558 	else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
559 		code = FPE_FLTDIV;
560 
561 	/* Inexact result */
562 	else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
563 		code = FPE_FLTRES;
564 
565 	_exception(SIGFPE, regs, code, regs->nip);
566 }
567 
568 /*
569  * Illegal instruction emulation support.  Originally written to
570  * provide the PVR to user applications using the mfspr rd, PVR.
571  * Return non-zero if we can't emulate, or -EFAULT if the associated
572  * memory access caused an access fault.  Return zero on success.
573  *
574  * There are a couple of ways to do this, either "decode" the instruction
575  * or directly match lots of bits.  In this case, matching lots of
576  * bits is faster and easier.
577  *
578  */
579 #define INST_MFSPR_PVR		0x7c1f42a6
580 #define INST_MFSPR_PVR_MASK	0xfc1fffff
581 
582 #define INST_DCBA		0x7c0005ec
583 #define INST_DCBA_MASK		0x7c0007fe
584 
585 #define INST_MCRXR		0x7c000400
586 #define INST_MCRXR_MASK		0x7c0007fe
587 
588 #define INST_STRING		0x7c00042a
589 #define INST_STRING_MASK	0x7c0007fe
590 #define INST_STRING_GEN_MASK	0x7c00067e
591 #define INST_LSWI		0x7c0004aa
592 #define INST_LSWX		0x7c00042a
593 #define INST_STSWI		0x7c0005aa
594 #define INST_STSWX		0x7c00052a
595 
596 static int emulate_string_inst(struct pt_regs *regs, u32 instword)
597 {
598 	u8 rT = (instword >> 21) & 0x1f;
599 	u8 rA = (instword >> 16) & 0x1f;
600 	u8 NB_RB = (instword >> 11) & 0x1f;
601 	u32 num_bytes;
602 	unsigned long EA;
603 	int pos = 0;
604 
605 	/* Early out if we are an invalid form of lswx */
606 	if ((instword & INST_STRING_MASK) == INST_LSWX)
607 		if ((rT == rA) || (rT == NB_RB))
608 			return -EINVAL;
609 
610 	EA = (rA == 0) ? 0 : regs->gpr[rA];
611 
612 	switch (instword & INST_STRING_MASK) {
613 		case INST_LSWX:
614 		case INST_STSWX:
615 			EA += NB_RB;
616 			num_bytes = regs->xer & 0x7f;
617 			break;
618 		case INST_LSWI:
619 		case INST_STSWI:
620 			num_bytes = (NB_RB == 0) ? 32 : NB_RB;
621 			break;
622 		default:
623 			return -EINVAL;
624 	}
625 
626 	while (num_bytes != 0)
627 	{
628 		u8 val;
629 		u32 shift = 8 * (3 - (pos & 0x3));
630 
631 		switch ((instword & INST_STRING_MASK)) {
632 			case INST_LSWX:
633 			case INST_LSWI:
634 				if (get_user(val, (u8 __user *)EA))
635 					return -EFAULT;
636 				/* first time updating this reg,
637 				 * zero it out */
638 				if (pos == 0)
639 					regs->gpr[rT] = 0;
640 				regs->gpr[rT] |= val << shift;
641 				break;
642 			case INST_STSWI:
643 			case INST_STSWX:
644 				val = regs->gpr[rT] >> shift;
645 				if (put_user(val, (u8 __user *)EA))
646 					return -EFAULT;
647 				break;
648 		}
649 		/* move EA to next address */
650 		EA += 1;
651 		num_bytes--;
652 
653 		/* manage our position within the register */
654 		if (++pos == 4) {
655 			pos = 0;
656 			if (++rT == 32)
657 				rT = 0;
658 		}
659 	}
660 
661 	return 0;
662 }
663 
664 static int emulate_instruction(struct pt_regs *regs)
665 {
666 	u32 instword;
667 	u32 rd;
668 
669 	if (!user_mode(regs) || (regs->msr & MSR_LE))
670 		return -EINVAL;
671 	CHECK_FULL_REGS(regs);
672 
673 	if (get_user(instword, (u32 __user *)(regs->nip)))
674 		return -EFAULT;
675 
676 	/* Emulate the mfspr rD, PVR. */
677 	if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) {
678 		rd = (instword >> 21) & 0x1f;
679 		regs->gpr[rd] = mfspr(SPRN_PVR);
680 		return 0;
681 	}
682 
683 	/* Emulating the dcba insn is just a no-op.  */
684 	if ((instword & INST_DCBA_MASK) == INST_DCBA)
685 		return 0;
686 
687 	/* Emulate the mcrxr insn.  */
688 	if ((instword & INST_MCRXR_MASK) == INST_MCRXR) {
689 		int shift = (instword >> 21) & 0x1c;
690 		unsigned long msk = 0xf0000000UL >> shift;
691 
692 		regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
693 		regs->xer &= ~0xf0000000UL;
694 		return 0;
695 	}
696 
697 	/* Emulate load/store string insn. */
698 	if ((instword & INST_STRING_GEN_MASK) == INST_STRING)
699 		return emulate_string_inst(regs, instword);
700 
701 	return -EINVAL;
702 }
703 
704 /*
705  * Look through the list of trap instructions that are used for BUG(),
706  * BUG_ON() and WARN_ON() and see if we hit one.  At this point we know
707  * that the exception was caused by a trap instruction of some kind.
708  * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0
709  * otherwise.
710  */
711 extern struct bug_entry __start___bug_table[], __stop___bug_table[];
712 
713 #ifndef CONFIG_MODULES
714 #define module_find_bug(x)	NULL
715 #endif
716 
717 struct bug_entry *find_bug(unsigned long bugaddr)
718 {
719 	struct bug_entry *bug;
720 
721 	for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
722 		if (bugaddr == bug->bug_addr)
723 			return bug;
724 	return module_find_bug(bugaddr);
725 }
726 
727 static int check_bug_trap(struct pt_regs *regs)
728 {
729 	struct bug_entry *bug;
730 	unsigned long addr;
731 
732 	if (regs->msr & MSR_PR)
733 		return 0;	/* not in kernel */
734 	addr = regs->nip;	/* address of trap instruction */
735 	if (addr < PAGE_OFFSET)
736 		return 0;
737 	bug = find_bug(regs->nip);
738 	if (bug == NULL)
739 		return 0;
740 	if (bug->line & BUG_WARNING_TRAP) {
741 		/* this is a WARN_ON rather than BUG/BUG_ON */
742 		printk(KERN_ERR "Badness in %s at %s:%ld\n",
743 		       bug->function, bug->file,
744 		       bug->line & ~BUG_WARNING_TRAP);
745 		dump_stack();
746 		return 1;
747 	}
748 	printk(KERN_CRIT "kernel BUG in %s at %s:%ld!\n",
749 	       bug->function, bug->file, bug->line);
750 
751 	return 0;
752 }
753 
754 void __kprobes program_check_exception(struct pt_regs *regs)
755 {
756 	unsigned int reason = get_reason(regs);
757 	extern int do_mathemu(struct pt_regs *regs);
758 
759 #ifdef CONFIG_MATH_EMULATION
760 	/* (reason & REASON_ILLEGAL) would be the obvious thing here,
761 	 * but there seems to be a hardware bug on the 405GP (RevD)
762 	 * that means ESR is sometimes set incorrectly - either to
763 	 * ESR_DST (!?) or 0.  In the process of chasing this with the
764 	 * hardware people - not sure if it can happen on any illegal
765 	 * instruction or only on FP instructions, whether there is a
766 	 * pattern to occurences etc. -dgibson 31/Mar/2003 */
767 	if (!(reason & REASON_TRAP) && do_mathemu(regs) == 0) {
768 		emulate_single_step(regs);
769 		return;
770 	}
771 #endif /* CONFIG_MATH_EMULATION */
772 
773 	if (reason & REASON_FP) {
774 		/* IEEE FP exception */
775 		parse_fpe(regs);
776 		return;
777 	}
778 	if (reason & REASON_TRAP) {
779 		/* trap exception */
780 		if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
781 				== NOTIFY_STOP)
782 			return;
783 		if (debugger_bpt(regs))
784 			return;
785 		if (check_bug_trap(regs)) {
786 			regs->nip += 4;
787 			return;
788 		}
789 		_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
790 		return;
791 	}
792 
793 	local_irq_enable();
794 
795 	/* Try to emulate it if we should. */
796 	if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
797 		switch (emulate_instruction(regs)) {
798 		case 0:
799 			regs->nip += 4;
800 			emulate_single_step(regs);
801 			return;
802 		case -EFAULT:
803 			_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
804 			return;
805 		}
806 	}
807 
808 	if (reason & REASON_PRIVILEGED)
809 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
810 	else
811 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
812 }
813 
814 void alignment_exception(struct pt_regs *regs)
815 {
816 	int fixed = 0;
817 
818 	/* we don't implement logging of alignment exceptions */
819 	if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
820 		fixed = fix_alignment(regs);
821 
822 	if (fixed == 1) {
823 		regs->nip += 4;	/* skip over emulated instruction */
824 		emulate_single_step(regs);
825 		return;
826 	}
827 
828 	/* Operand address was bad */
829 	if (fixed == -EFAULT) {
830 		if (user_mode(regs))
831 			_exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar);
832 		else
833 			/* Search exception table */
834 			bad_page_fault(regs, regs->dar, SIGSEGV);
835 		return;
836 	}
837 	_exception(SIGBUS, regs, BUS_ADRALN, regs->dar);
838 }
839 
840 void StackOverflow(struct pt_regs *regs)
841 {
842 	printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
843 	       current, regs->gpr[1]);
844 	debugger(regs);
845 	show_regs(regs);
846 	panic("kernel stack overflow");
847 }
848 
849 void nonrecoverable_exception(struct pt_regs *regs)
850 {
851 	printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
852 	       regs->nip, regs->msr);
853 	debugger(regs);
854 	die("nonrecoverable exception", regs, SIGKILL);
855 }
856 
857 void trace_syscall(struct pt_regs *regs)
858 {
859 	printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld    %s\n",
860 	       current, current->pid, regs->nip, regs->link, regs->gpr[0],
861 	       regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
862 }
863 
864 void kernel_fp_unavailable_exception(struct pt_regs *regs)
865 {
866 	printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
867 			  "%lx at %lx\n", regs->trap, regs->nip);
868 	die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
869 }
870 
871 void altivec_unavailable_exception(struct pt_regs *regs)
872 {
873 #if !defined(CONFIG_ALTIVEC)
874 	if (user_mode(regs)) {
875 		/* A user program has executed an altivec instruction,
876 		   but this kernel doesn't support altivec. */
877 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
878 		return;
879 	}
880 #endif
881 	printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
882 			"%lx at %lx\n", regs->trap, regs->nip);
883 	die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
884 }
885 
886 void performance_monitor_exception(struct pt_regs *regs)
887 {
888 	perf_irq(regs);
889 }
890 
891 #ifdef CONFIG_8xx
892 void SoftwareEmulation(struct pt_regs *regs)
893 {
894 	extern int do_mathemu(struct pt_regs *);
895 	extern int Soft_emulate_8xx(struct pt_regs *);
896 	int errcode;
897 
898 	CHECK_FULL_REGS(regs);
899 
900 	if (!user_mode(regs)) {
901 		debugger(regs);
902 		die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
903 	}
904 
905 #ifdef CONFIG_MATH_EMULATION
906 	errcode = do_mathemu(regs);
907 #else
908 	errcode = Soft_emulate_8xx(regs);
909 #endif
910 	if (errcode) {
911 		if (errcode > 0)
912 			_exception(SIGFPE, regs, 0, 0);
913 		else if (errcode == -EFAULT)
914 			_exception(SIGSEGV, regs, 0, 0);
915 		else
916 			_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
917 	} else
918 		emulate_single_step(regs);
919 }
920 #endif /* CONFIG_8xx */
921 
922 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
923 
924 void DebugException(struct pt_regs *regs, unsigned long debug_status)
925 {
926 	if (debug_status & DBSR_IC) {	/* instruction completion */
927 		regs->msr &= ~MSR_DE;
928 		if (user_mode(regs)) {
929 			current->thread.dbcr0 &= ~DBCR0_IC;
930 		} else {
931 			/* Disable instruction completion */
932 			mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
933 			/* Clear the instruction completion event */
934 			mtspr(SPRN_DBSR, DBSR_IC);
935 			if (debugger_sstep(regs))
936 				return;
937 		}
938 		_exception(SIGTRAP, regs, TRAP_TRACE, 0);
939 	}
940 }
941 #endif /* CONFIG_4xx || CONFIG_BOOKE */
942 
943 #if !defined(CONFIG_TAU_INT)
944 void TAUException(struct pt_regs *regs)
945 {
946 	printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx    %s\n",
947 	       regs->nip, regs->msr, regs->trap, print_tainted());
948 }
949 #endif /* CONFIG_INT_TAU */
950 
951 #ifdef CONFIG_ALTIVEC
952 void altivec_assist_exception(struct pt_regs *regs)
953 {
954 	int err;
955 
956 	if (!user_mode(regs)) {
957 		printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
958 		       " at %lx\n", regs->nip);
959 		die("Kernel VMX/Altivec assist exception", regs, SIGILL);
960 	}
961 
962 	flush_altivec_to_thread(current);
963 
964 	err = emulate_altivec(regs);
965 	if (err == 0) {
966 		regs->nip += 4;		/* skip emulated instruction */
967 		emulate_single_step(regs);
968 		return;
969 	}
970 
971 	if (err == -EFAULT) {
972 		/* got an error reading the instruction */
973 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
974 	} else {
975 		/* didn't recognize the instruction */
976 		/* XXX quick hack for now: set the non-Java bit in the VSCR */
977 		if (printk_ratelimit())
978 			printk(KERN_ERR "Unrecognized altivec instruction "
979 			       "in %s at %lx\n", current->comm, regs->nip);
980 		current->thread.vscr.u[3] |= 0x10000;
981 	}
982 }
983 #endif /* CONFIG_ALTIVEC */
984 
985 #ifdef CONFIG_FSL_BOOKE
986 void CacheLockingException(struct pt_regs *regs, unsigned long address,
987 			   unsigned long error_code)
988 {
989 	/* We treat cache locking instructions from the user
990 	 * as priv ops, in the future we could try to do
991 	 * something smarter
992 	 */
993 	if (error_code & (ESR_DLK|ESR_ILK))
994 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
995 	return;
996 }
997 #endif /* CONFIG_FSL_BOOKE */
998 
999 #ifdef CONFIG_SPE
1000 void SPEFloatingPointException(struct pt_regs *regs)
1001 {
1002 	unsigned long spefscr;
1003 	int fpexc_mode;
1004 	int code = 0;
1005 
1006 	spefscr = current->thread.spefscr;
1007 	fpexc_mode = current->thread.fpexc_mode;
1008 
1009 	/* Hardware does not neccessarily set sticky
1010 	 * underflow/overflow/invalid flags */
1011 	if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1012 		code = FPE_FLTOVF;
1013 		spefscr |= SPEFSCR_FOVFS;
1014 	}
1015 	else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1016 		code = FPE_FLTUND;
1017 		spefscr |= SPEFSCR_FUNFS;
1018 	}
1019 	else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1020 		code = FPE_FLTDIV;
1021 	else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1022 		code = FPE_FLTINV;
1023 		spefscr |= SPEFSCR_FINVS;
1024 	}
1025 	else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1026 		code = FPE_FLTRES;
1027 
1028 	current->thread.spefscr = spefscr;
1029 
1030 	_exception(SIGFPE, regs, code, regs->nip);
1031 	return;
1032 }
1033 #endif
1034 
1035 /*
1036  * We enter here if we get an unrecoverable exception, that is, one
1037  * that happened at a point where the RI (recoverable interrupt) bit
1038  * in the MSR is 0.  This indicates that SRR0/1 are live, and that
1039  * we therefore lost state by taking this exception.
1040  */
1041 void unrecoverable_exception(struct pt_regs *regs)
1042 {
1043 	printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1044 	       regs->trap, regs->nip);
1045 	die("Unrecoverable exception", regs, SIGABRT);
1046 }
1047 
1048 #ifdef CONFIG_BOOKE_WDT
1049 /*
1050  * Default handler for a Watchdog exception,
1051  * spins until a reboot occurs
1052  */
1053 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1054 {
1055 	/* Generic WatchdogHandler, implement your own */
1056 	mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1057 	return;
1058 }
1059 
1060 void WatchdogException(struct pt_regs *regs)
1061 {
1062 	printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1063 	WatchdogHandler(regs);
1064 }
1065 #endif
1066 
1067 /*
1068  * We enter here if we discover during exception entry that we are
1069  * running in supervisor mode with a userspace value in the stack pointer.
1070  */
1071 void kernel_bad_stack(struct pt_regs *regs)
1072 {
1073 	printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1074 	       regs->gpr[1], regs->nip);
1075 	die("Bad kernel stack pointer", regs, SIGABRT);
1076 }
1077 
1078 void __init trap_init(void)
1079 {
1080 }
1081