xref: /linux/arch/powerpc/kernel/traps.c (revision 606d099cdd1080bbb50ea50dc52d98252f8f10a1)
1 /*
2  *  Copyright (C) 1995-1996  Gary Thomas (gdt@linuxppc.org)
3  *
4  *  This program is free software; you can redistribute it and/or
5  *  modify it under the terms of the GNU General Public License
6  *  as published by the Free Software Foundation; either version
7  *  2 of the License, or (at your option) any later version.
8  *
9  *  Modified by Cort Dougan (cort@cs.nmt.edu)
10  *  and Paul Mackerras (paulus@samba.org)
11  */
12 
13 /*
14  * This file handles the architecture-dependent parts of hardware exceptions
15  */
16 
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/stddef.h>
22 #include <linux/unistd.h>
23 #include <linux/ptrace.h>
24 #include <linux/slab.h>
25 #include <linux/user.h>
26 #include <linux/a.out.h>
27 #include <linux/interrupt.h>
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/prctl.h>
31 #include <linux/delay.h>
32 #include <linux/kprobes.h>
33 #include <linux/kexec.h>
34 #include <linux/backlight.h>
35 
36 #include <asm/kdebug.h>
37 #include <asm/pgtable.h>
38 #include <asm/uaccess.h>
39 #include <asm/system.h>
40 #include <asm/io.h>
41 #include <asm/machdep.h>
42 #include <asm/rtas.h>
43 #include <asm/pmc.h>
44 #ifdef CONFIG_PPC32
45 #include <asm/reg.h>
46 #endif
47 #ifdef CONFIG_PMAC_BACKLIGHT
48 #include <asm/backlight.h>
49 #endif
50 #ifdef CONFIG_PPC64
51 #include <asm/firmware.h>
52 #include <asm/processor.h>
53 #endif
54 #include <asm/kexec.h>
55 
56 #ifdef CONFIG_DEBUGGER
57 int (*__debugger)(struct pt_regs *regs);
58 int (*__debugger_ipi)(struct pt_regs *regs);
59 int (*__debugger_bpt)(struct pt_regs *regs);
60 int (*__debugger_sstep)(struct pt_regs *regs);
61 int (*__debugger_iabr_match)(struct pt_regs *regs);
62 int (*__debugger_dabr_match)(struct pt_regs *regs);
63 int (*__debugger_fault_handler)(struct pt_regs *regs);
64 
65 EXPORT_SYMBOL(__debugger);
66 EXPORT_SYMBOL(__debugger_ipi);
67 EXPORT_SYMBOL(__debugger_bpt);
68 EXPORT_SYMBOL(__debugger_sstep);
69 EXPORT_SYMBOL(__debugger_iabr_match);
70 EXPORT_SYMBOL(__debugger_dabr_match);
71 EXPORT_SYMBOL(__debugger_fault_handler);
72 #endif
73 
74 ATOMIC_NOTIFIER_HEAD(powerpc_die_chain);
75 
76 int register_die_notifier(struct notifier_block *nb)
77 {
78 	return atomic_notifier_chain_register(&powerpc_die_chain, nb);
79 }
80 EXPORT_SYMBOL(register_die_notifier);
81 
82 int unregister_die_notifier(struct notifier_block *nb)
83 {
84 	return atomic_notifier_chain_unregister(&powerpc_die_chain, nb);
85 }
86 EXPORT_SYMBOL(unregister_die_notifier);
87 
88 /*
89  * Trap & Exception support
90  */
91 
92 static DEFINE_SPINLOCK(die_lock);
93 
94 int die(const char *str, struct pt_regs *regs, long err)
95 {
96 	static int die_counter;
97 
98 	if (debugger(regs))
99 		return 1;
100 
101 	console_verbose();
102 	spin_lock_irq(&die_lock);
103 	bust_spinlocks(1);
104 #ifdef CONFIG_PMAC_BACKLIGHT
105 	mutex_lock(&pmac_backlight_mutex);
106 	if (machine_is(powermac) && pmac_backlight) {
107 		struct backlight_properties *props;
108 
109 		down(&pmac_backlight->sem);
110 		props = pmac_backlight->props;
111 		props->brightness = props->max_brightness;
112 		props->power = FB_BLANK_UNBLANK;
113 		props->update_status(pmac_backlight);
114 		up(&pmac_backlight->sem);
115 	}
116 	mutex_unlock(&pmac_backlight_mutex);
117 #endif
118 	printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
119 #ifdef CONFIG_PREEMPT
120 	printk("PREEMPT ");
121 #endif
122 #ifdef CONFIG_SMP
123 	printk("SMP NR_CPUS=%d ", NR_CPUS);
124 #endif
125 #ifdef CONFIG_DEBUG_PAGEALLOC
126 	printk("DEBUG_PAGEALLOC ");
127 #endif
128 #ifdef CONFIG_NUMA
129 	printk("NUMA ");
130 #endif
131 	printk("%s\n", ppc_md.name ? "" : ppc_md.name);
132 
133 	print_modules();
134 	show_regs(regs);
135 	bust_spinlocks(0);
136 	spin_unlock_irq(&die_lock);
137 
138 	if (kexec_should_crash(current) ||
139 		kexec_sr_activated(smp_processor_id()))
140 		crash_kexec(regs);
141 	crash_kexec_secondary(regs);
142 
143 	if (in_interrupt())
144 		panic("Fatal exception in interrupt");
145 
146 	if (panic_on_oops)
147 		panic("Fatal exception");
148 
149 	do_exit(err);
150 
151 	return 0;
152 }
153 
154 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
155 {
156 	siginfo_t info;
157 
158 	if (!user_mode(regs)) {
159 		if (die("Exception in kernel mode", regs, signr))
160 			return;
161 	}
162 
163 	memset(&info, 0, sizeof(info));
164 	info.si_signo = signr;
165 	info.si_code = code;
166 	info.si_addr = (void __user *) addr;
167 	force_sig_info(signr, &info, current);
168 
169 	/*
170 	 * Init gets no signals that it doesn't have a handler for.
171 	 * That's all very well, but if it has caused a synchronous
172 	 * exception and we ignore the resulting signal, it will just
173 	 * generate the same exception over and over again and we get
174 	 * nowhere.  Better to kill it and let the kernel panic.
175 	 */
176 	if (current->pid == 1) {
177 		__sighandler_t handler;
178 
179 		spin_lock_irq(&current->sighand->siglock);
180 		handler = current->sighand->action[signr-1].sa.sa_handler;
181 		spin_unlock_irq(&current->sighand->siglock);
182 		if (handler == SIG_DFL) {
183 			/* init has generated a synchronous exception
184 			   and it doesn't have a handler for the signal */
185 			printk(KERN_CRIT "init has generated signal %d "
186 			       "but has no handler for it\n", signr);
187 			do_exit(signr);
188 		}
189 	}
190 }
191 
192 #ifdef CONFIG_PPC64
193 void system_reset_exception(struct pt_regs *regs)
194 {
195 	/* See if any machine dependent calls */
196 	if (ppc_md.system_reset_exception) {
197 		if (ppc_md.system_reset_exception(regs))
198 			return;
199 	}
200 
201 #ifdef CONFIG_KEXEC
202 	cpu_set(smp_processor_id(), cpus_in_sr);
203 #endif
204 
205 	die("System Reset", regs, SIGABRT);
206 
207 	/*
208 	 * Some CPUs when released from the debugger will execute this path.
209 	 * These CPUs entered the debugger via a soft-reset. If the CPU was
210 	 * hung before entering the debugger it will return to the hung
211 	 * state when exiting this function.  This causes a problem in
212 	 * kdump since the hung CPU(s) will not respond to the IPI sent
213 	 * from kdump. To prevent the problem we call crash_kexec_secondary()
214 	 * here. If a kdump had not been initiated or we exit the debugger
215 	 * with the "exit and recover" command (x) crash_kexec_secondary()
216 	 * will return after 5ms and the CPU returns to its previous state.
217 	 */
218 	crash_kexec_secondary(regs);
219 
220 	/* Must die if the interrupt is not recoverable */
221 	if (!(regs->msr & MSR_RI))
222 		panic("Unrecoverable System Reset");
223 
224 	/* What should we do here? We could issue a shutdown or hard reset. */
225 }
226 #endif
227 
228 /*
229  * I/O accesses can cause machine checks on powermacs.
230  * Check if the NIP corresponds to the address of a sync
231  * instruction for which there is an entry in the exception
232  * table.
233  * Note that the 601 only takes a machine check on TEA
234  * (transfer error ack) signal assertion, and does not
235  * set any of the top 16 bits of SRR1.
236  *  -- paulus.
237  */
238 static inline int check_io_access(struct pt_regs *regs)
239 {
240 #ifdef CONFIG_PPC32
241 	unsigned long msr = regs->msr;
242 	const struct exception_table_entry *entry;
243 	unsigned int *nip = (unsigned int *)regs->nip;
244 
245 	if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
246 	    && (entry = search_exception_tables(regs->nip)) != NULL) {
247 		/*
248 		 * Check that it's a sync instruction, or somewhere
249 		 * in the twi; isync; nop sequence that inb/inw/inl uses.
250 		 * As the address is in the exception table
251 		 * we should be able to read the instr there.
252 		 * For the debug message, we look at the preceding
253 		 * load or store.
254 		 */
255 		if (*nip == 0x60000000)		/* nop */
256 			nip -= 2;
257 		else if (*nip == 0x4c00012c)	/* isync */
258 			--nip;
259 		if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
260 			/* sync or twi */
261 			unsigned int rb;
262 
263 			--nip;
264 			rb = (*nip >> 11) & 0x1f;
265 			printk(KERN_DEBUG "%s bad port %lx at %p\n",
266 			       (*nip & 0x100)? "OUT to": "IN from",
267 			       regs->gpr[rb] - _IO_BASE, nip);
268 			regs->msr |= MSR_RI;
269 			regs->nip = entry->fixup;
270 			return 1;
271 		}
272 	}
273 #endif /* CONFIG_PPC32 */
274 	return 0;
275 }
276 
277 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
278 /* On 4xx, the reason for the machine check or program exception
279    is in the ESR. */
280 #define get_reason(regs)	((regs)->dsisr)
281 #ifndef CONFIG_FSL_BOOKE
282 #define get_mc_reason(regs)	((regs)->dsisr)
283 #else
284 #define get_mc_reason(regs)	(mfspr(SPRN_MCSR))
285 #endif
286 #define REASON_FP		ESR_FP
287 #define REASON_ILLEGAL		(ESR_PIL | ESR_PUO)
288 #define REASON_PRIVILEGED	ESR_PPR
289 #define REASON_TRAP		ESR_PTR
290 
291 /* single-step stuff */
292 #define single_stepping(regs)	(current->thread.dbcr0 & DBCR0_IC)
293 #define clear_single_step(regs)	(current->thread.dbcr0 &= ~DBCR0_IC)
294 
295 #else
296 /* On non-4xx, the reason for the machine check or program
297    exception is in the MSR. */
298 #define get_reason(regs)	((regs)->msr)
299 #define get_mc_reason(regs)	((regs)->msr)
300 #define REASON_FP		0x100000
301 #define REASON_ILLEGAL		0x80000
302 #define REASON_PRIVILEGED	0x40000
303 #define REASON_TRAP		0x20000
304 
305 #define single_stepping(regs)	((regs)->msr & MSR_SE)
306 #define clear_single_step(regs)	((regs)->msr &= ~MSR_SE)
307 #endif
308 
309 /*
310  * This is "fall-back" implementation for configurations
311  * which don't provide platform-specific machine check info
312  */
313 void __attribute__ ((weak))
314 platform_machine_check(struct pt_regs *regs)
315 {
316 }
317 
318 void machine_check_exception(struct pt_regs *regs)
319 {
320 	int recover = 0;
321 	unsigned long reason = get_mc_reason(regs);
322 
323 	/* See if any machine dependent calls */
324 	if (ppc_md.machine_check_exception)
325 		recover = ppc_md.machine_check_exception(regs);
326 
327 	if (recover)
328 		return;
329 
330 	if (user_mode(regs)) {
331 		regs->msr |= MSR_RI;
332 		_exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
333 		return;
334 	}
335 
336 #if defined(CONFIG_8xx) && defined(CONFIG_PCI)
337 	/* the qspan pci read routines can cause machine checks -- Cort */
338 	bad_page_fault(regs, regs->dar, SIGBUS);
339 	return;
340 #endif
341 
342 	if (debugger_fault_handler(regs)) {
343 		regs->msr |= MSR_RI;
344 		return;
345 	}
346 
347 	if (check_io_access(regs))
348 		return;
349 
350 #if defined(CONFIG_4xx) && !defined(CONFIG_440A)
351 	if (reason & ESR_IMCP) {
352 		printk("Instruction");
353 		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
354 	} else
355 		printk("Data");
356 	printk(" machine check in kernel mode.\n");
357 #elif defined(CONFIG_440A)
358 	printk("Machine check in kernel mode.\n");
359 	if (reason & ESR_IMCP){
360 		printk("Instruction Synchronous Machine Check exception\n");
361 		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
362 	}
363 	else {
364 		u32 mcsr = mfspr(SPRN_MCSR);
365 		if (mcsr & MCSR_IB)
366 			printk("Instruction Read PLB Error\n");
367 		if (mcsr & MCSR_DRB)
368 			printk("Data Read PLB Error\n");
369 		if (mcsr & MCSR_DWB)
370 			printk("Data Write PLB Error\n");
371 		if (mcsr & MCSR_TLBP)
372 			printk("TLB Parity Error\n");
373 		if (mcsr & MCSR_ICP){
374 			flush_instruction_cache();
375 			printk("I-Cache Parity Error\n");
376 		}
377 		if (mcsr & MCSR_DCSP)
378 			printk("D-Cache Search Parity Error\n");
379 		if (mcsr & MCSR_DCFP)
380 			printk("D-Cache Flush Parity Error\n");
381 		if (mcsr & MCSR_IMPE)
382 			printk("Machine Check exception is imprecise\n");
383 
384 		/* Clear MCSR */
385 		mtspr(SPRN_MCSR, mcsr);
386 	}
387 #elif defined (CONFIG_E500)
388 	printk("Machine check in kernel mode.\n");
389 	printk("Caused by (from MCSR=%lx): ", reason);
390 
391 	if (reason & MCSR_MCP)
392 		printk("Machine Check Signal\n");
393 	if (reason & MCSR_ICPERR)
394 		printk("Instruction Cache Parity Error\n");
395 	if (reason & MCSR_DCP_PERR)
396 		printk("Data Cache Push Parity Error\n");
397 	if (reason & MCSR_DCPERR)
398 		printk("Data Cache Parity Error\n");
399 	if (reason & MCSR_GL_CI)
400 		printk("Guarded Load or Cache-Inhibited stwcx.\n");
401 	if (reason & MCSR_BUS_IAERR)
402 		printk("Bus - Instruction Address Error\n");
403 	if (reason & MCSR_BUS_RAERR)
404 		printk("Bus - Read Address Error\n");
405 	if (reason & MCSR_BUS_WAERR)
406 		printk("Bus - Write Address Error\n");
407 	if (reason & MCSR_BUS_IBERR)
408 		printk("Bus - Instruction Data Error\n");
409 	if (reason & MCSR_BUS_RBERR)
410 		printk("Bus - Read Data Bus Error\n");
411 	if (reason & MCSR_BUS_WBERR)
412 		printk("Bus - Read Data Bus Error\n");
413 	if (reason & MCSR_BUS_IPERR)
414 		printk("Bus - Instruction Parity Error\n");
415 	if (reason & MCSR_BUS_RPERR)
416 		printk("Bus - Read Parity Error\n");
417 #elif defined (CONFIG_E200)
418 	printk("Machine check in kernel mode.\n");
419 	printk("Caused by (from MCSR=%lx): ", reason);
420 
421 	if (reason & MCSR_MCP)
422 		printk("Machine Check Signal\n");
423 	if (reason & MCSR_CP_PERR)
424 		printk("Cache Push Parity Error\n");
425 	if (reason & MCSR_CPERR)
426 		printk("Cache Parity Error\n");
427 	if (reason & MCSR_EXCP_ERR)
428 		printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
429 	if (reason & MCSR_BUS_IRERR)
430 		printk("Bus - Read Bus Error on instruction fetch\n");
431 	if (reason & MCSR_BUS_DRERR)
432 		printk("Bus - Read Bus Error on data load\n");
433 	if (reason & MCSR_BUS_WRERR)
434 		printk("Bus - Write Bus Error on buffered store or cache line push\n");
435 #else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */
436 	printk("Machine check in kernel mode.\n");
437 	printk("Caused by (from SRR1=%lx): ", reason);
438 	switch (reason & 0x601F0000) {
439 	case 0x80000:
440 		printk("Machine check signal\n");
441 		break;
442 	case 0:		/* for 601 */
443 	case 0x40000:
444 	case 0x140000:	/* 7450 MSS error and TEA */
445 		printk("Transfer error ack signal\n");
446 		break;
447 	case 0x20000:
448 		printk("Data parity error signal\n");
449 		break;
450 	case 0x10000:
451 		printk("Address parity error signal\n");
452 		break;
453 	case 0x20000000:
454 		printk("L1 Data Cache error\n");
455 		break;
456 	case 0x40000000:
457 		printk("L1 Instruction Cache error\n");
458 		break;
459 	case 0x00100000:
460 		printk("L2 data cache parity error\n");
461 		break;
462 	default:
463 		printk("Unknown values in msr\n");
464 	}
465 #endif /* CONFIG_4xx */
466 
467 	/*
468 	 * Optional platform-provided routine to print out
469 	 * additional info, e.g. bus error registers.
470 	 */
471 	platform_machine_check(regs);
472 
473 	if (debugger_fault_handler(regs))
474 		return;
475 	die("Machine check", regs, SIGBUS);
476 
477 	/* Must die if the interrupt is not recoverable */
478 	if (!(regs->msr & MSR_RI))
479 		panic("Unrecoverable Machine check");
480 }
481 
482 void SMIException(struct pt_regs *regs)
483 {
484 	die("System Management Interrupt", regs, SIGABRT);
485 }
486 
487 void unknown_exception(struct pt_regs *regs)
488 {
489 	printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
490 	       regs->nip, regs->msr, regs->trap);
491 
492 	_exception(SIGTRAP, regs, 0, 0);
493 }
494 
495 void instruction_breakpoint_exception(struct pt_regs *regs)
496 {
497 	if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
498 					5, SIGTRAP) == NOTIFY_STOP)
499 		return;
500 	if (debugger_iabr_match(regs))
501 		return;
502 	_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
503 }
504 
505 void RunModeException(struct pt_regs *regs)
506 {
507 	_exception(SIGTRAP, regs, 0, 0);
508 }
509 
510 void __kprobes single_step_exception(struct pt_regs *regs)
511 {
512 	regs->msr &= ~(MSR_SE | MSR_BE);  /* Turn off 'trace' bits */
513 
514 	if (notify_die(DIE_SSTEP, "single_step", regs, 5,
515 					5, SIGTRAP) == NOTIFY_STOP)
516 		return;
517 	if (debugger_sstep(regs))
518 		return;
519 
520 	_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
521 }
522 
523 /*
524  * After we have successfully emulated an instruction, we have to
525  * check if the instruction was being single-stepped, and if so,
526  * pretend we got a single-step exception.  This was pointed out
527  * by Kumar Gala.  -- paulus
528  */
529 static void emulate_single_step(struct pt_regs *regs)
530 {
531 	if (single_stepping(regs)) {
532 		clear_single_step(regs);
533 		_exception(SIGTRAP, regs, TRAP_TRACE, 0);
534 	}
535 }
536 
537 static void parse_fpe(struct pt_regs *regs)
538 {
539 	int code = 0;
540 	unsigned long fpscr;
541 
542 	flush_fp_to_thread(current);
543 
544 	fpscr = current->thread.fpscr.val;
545 
546 	/* Invalid operation */
547 	if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
548 		code = FPE_FLTINV;
549 
550 	/* Overflow */
551 	else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
552 		code = FPE_FLTOVF;
553 
554 	/* Underflow */
555 	else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
556 		code = FPE_FLTUND;
557 
558 	/* Divide by zero */
559 	else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
560 		code = FPE_FLTDIV;
561 
562 	/* Inexact result */
563 	else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
564 		code = FPE_FLTRES;
565 
566 	_exception(SIGFPE, regs, code, regs->nip);
567 }
568 
569 /*
570  * Illegal instruction emulation support.  Originally written to
571  * provide the PVR to user applications using the mfspr rd, PVR.
572  * Return non-zero if we can't emulate, or -EFAULT if the associated
573  * memory access caused an access fault.  Return zero on success.
574  *
575  * There are a couple of ways to do this, either "decode" the instruction
576  * or directly match lots of bits.  In this case, matching lots of
577  * bits is faster and easier.
578  *
579  */
580 #define INST_MFSPR_PVR		0x7c1f42a6
581 #define INST_MFSPR_PVR_MASK	0xfc1fffff
582 
583 #define INST_DCBA		0x7c0005ec
584 #define INST_DCBA_MASK		0xfc0007fe
585 
586 #define INST_MCRXR		0x7c000400
587 #define INST_MCRXR_MASK		0xfc0007fe
588 
589 #define INST_STRING		0x7c00042a
590 #define INST_STRING_MASK	0xfc0007fe
591 #define INST_STRING_GEN_MASK	0xfc00067e
592 #define INST_LSWI		0x7c0004aa
593 #define INST_LSWX		0x7c00042a
594 #define INST_STSWI		0x7c0005aa
595 #define INST_STSWX		0x7c00052a
596 
597 #define INST_POPCNTB		0x7c0000f4
598 #define INST_POPCNTB_MASK	0xfc0007fe
599 
600 static int emulate_string_inst(struct pt_regs *regs, u32 instword)
601 {
602 	u8 rT = (instword >> 21) & 0x1f;
603 	u8 rA = (instword >> 16) & 0x1f;
604 	u8 NB_RB = (instword >> 11) & 0x1f;
605 	u32 num_bytes;
606 	unsigned long EA;
607 	int pos = 0;
608 
609 	/* Early out if we are an invalid form of lswx */
610 	if ((instword & INST_STRING_MASK) == INST_LSWX)
611 		if ((rT == rA) || (rT == NB_RB))
612 			return -EINVAL;
613 
614 	EA = (rA == 0) ? 0 : regs->gpr[rA];
615 
616 	switch (instword & INST_STRING_MASK) {
617 		case INST_LSWX:
618 		case INST_STSWX:
619 			EA += NB_RB;
620 			num_bytes = regs->xer & 0x7f;
621 			break;
622 		case INST_LSWI:
623 		case INST_STSWI:
624 			num_bytes = (NB_RB == 0) ? 32 : NB_RB;
625 			break;
626 		default:
627 			return -EINVAL;
628 	}
629 
630 	while (num_bytes != 0)
631 	{
632 		u8 val;
633 		u32 shift = 8 * (3 - (pos & 0x3));
634 
635 		switch ((instword & INST_STRING_MASK)) {
636 			case INST_LSWX:
637 			case INST_LSWI:
638 				if (get_user(val, (u8 __user *)EA))
639 					return -EFAULT;
640 				/* first time updating this reg,
641 				 * zero it out */
642 				if (pos == 0)
643 					regs->gpr[rT] = 0;
644 				regs->gpr[rT] |= val << shift;
645 				break;
646 			case INST_STSWI:
647 			case INST_STSWX:
648 				val = regs->gpr[rT] >> shift;
649 				if (put_user(val, (u8 __user *)EA))
650 					return -EFAULT;
651 				break;
652 		}
653 		/* move EA to next address */
654 		EA += 1;
655 		num_bytes--;
656 
657 		/* manage our position within the register */
658 		if (++pos == 4) {
659 			pos = 0;
660 			if (++rT == 32)
661 				rT = 0;
662 		}
663 	}
664 
665 	return 0;
666 }
667 
668 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
669 {
670 	u32 ra,rs;
671 	unsigned long tmp;
672 
673 	ra = (instword >> 16) & 0x1f;
674 	rs = (instword >> 21) & 0x1f;
675 
676 	tmp = regs->gpr[rs];
677 	tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
678 	tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
679 	tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
680 	regs->gpr[ra] = tmp;
681 
682 	return 0;
683 }
684 
685 static int emulate_instruction(struct pt_regs *regs)
686 {
687 	u32 instword;
688 	u32 rd;
689 
690 	if (!user_mode(regs) || (regs->msr & MSR_LE))
691 		return -EINVAL;
692 	CHECK_FULL_REGS(regs);
693 
694 	if (get_user(instword, (u32 __user *)(regs->nip)))
695 		return -EFAULT;
696 
697 	/* Emulate the mfspr rD, PVR. */
698 	if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) {
699 		rd = (instword >> 21) & 0x1f;
700 		regs->gpr[rd] = mfspr(SPRN_PVR);
701 		return 0;
702 	}
703 
704 	/* Emulating the dcba insn is just a no-op.  */
705 	if ((instword & INST_DCBA_MASK) == INST_DCBA)
706 		return 0;
707 
708 	/* Emulate the mcrxr insn.  */
709 	if ((instword & INST_MCRXR_MASK) == INST_MCRXR) {
710 		int shift = (instword >> 21) & 0x1c;
711 		unsigned long msk = 0xf0000000UL >> shift;
712 
713 		regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
714 		regs->xer &= ~0xf0000000UL;
715 		return 0;
716 	}
717 
718 	/* Emulate load/store string insn. */
719 	if ((instword & INST_STRING_GEN_MASK) == INST_STRING)
720 		return emulate_string_inst(regs, instword);
721 
722 	/* Emulate the popcntb (Population Count Bytes) instruction. */
723 	if ((instword & INST_POPCNTB_MASK) == INST_POPCNTB) {
724 		return emulate_popcntb_inst(regs, instword);
725 	}
726 
727 	return -EINVAL;
728 }
729 
730 /*
731  * Look through the list of trap instructions that are used for BUG(),
732  * BUG_ON() and WARN_ON() and see if we hit one.  At this point we know
733  * that the exception was caused by a trap instruction of some kind.
734  * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0
735  * otherwise.
736  */
737 extern struct bug_entry __start___bug_table[], __stop___bug_table[];
738 
739 #ifndef CONFIG_MODULES
740 #define module_find_bug(x)	NULL
741 #endif
742 
743 struct bug_entry *find_bug(unsigned long bugaddr)
744 {
745 	struct bug_entry *bug;
746 
747 	for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
748 		if (bugaddr == bug->bug_addr)
749 			return bug;
750 	return module_find_bug(bugaddr);
751 }
752 
753 static int check_bug_trap(struct pt_regs *regs)
754 {
755 	struct bug_entry *bug;
756 	unsigned long addr;
757 
758 	if (regs->msr & MSR_PR)
759 		return 0;	/* not in kernel */
760 	addr = regs->nip;	/* address of trap instruction */
761 	if (addr < PAGE_OFFSET)
762 		return 0;
763 	bug = find_bug(regs->nip);
764 	if (bug == NULL)
765 		return 0;
766 	if (bug->line & BUG_WARNING_TRAP) {
767 		/* this is a WARN_ON rather than BUG/BUG_ON */
768 		printk(KERN_ERR "Badness in %s at %s:%ld\n",
769 		       bug->function, bug->file,
770 		       bug->line & ~BUG_WARNING_TRAP);
771 		dump_stack();
772 		return 1;
773 	}
774 	printk(KERN_CRIT "kernel BUG in %s at %s:%ld!\n",
775 	       bug->function, bug->file, bug->line);
776 
777 	return 0;
778 }
779 
780 void __kprobes program_check_exception(struct pt_regs *regs)
781 {
782 	unsigned int reason = get_reason(regs);
783 	extern int do_mathemu(struct pt_regs *regs);
784 
785 #ifdef CONFIG_MATH_EMULATION
786 	/* (reason & REASON_ILLEGAL) would be the obvious thing here,
787 	 * but there seems to be a hardware bug on the 405GP (RevD)
788 	 * that means ESR is sometimes set incorrectly - either to
789 	 * ESR_DST (!?) or 0.  In the process of chasing this with the
790 	 * hardware people - not sure if it can happen on any illegal
791 	 * instruction or only on FP instructions, whether there is a
792 	 * pattern to occurences etc. -dgibson 31/Mar/2003 */
793 	if (!(reason & REASON_TRAP) && do_mathemu(regs) == 0) {
794 		emulate_single_step(regs);
795 		return;
796 	}
797 #endif /* CONFIG_MATH_EMULATION */
798 
799 	if (reason & REASON_FP) {
800 		/* IEEE FP exception */
801 		parse_fpe(regs);
802 		return;
803 	}
804 	if (reason & REASON_TRAP) {
805 		/* trap exception */
806 		if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
807 				== NOTIFY_STOP)
808 			return;
809 		if (debugger_bpt(regs))
810 			return;
811 		if (check_bug_trap(regs)) {
812 			regs->nip += 4;
813 			return;
814 		}
815 		_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
816 		return;
817 	}
818 
819 	local_irq_enable();
820 
821 	/* Try to emulate it if we should. */
822 	if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
823 		switch (emulate_instruction(regs)) {
824 		case 0:
825 			regs->nip += 4;
826 			emulate_single_step(regs);
827 			return;
828 		case -EFAULT:
829 			_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
830 			return;
831 		}
832 	}
833 
834 	if (reason & REASON_PRIVILEGED)
835 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
836 	else
837 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
838 }
839 
840 void alignment_exception(struct pt_regs *regs)
841 {
842 	int sig, code, fixed = 0;
843 
844 	/* we don't implement logging of alignment exceptions */
845 	if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
846 		fixed = fix_alignment(regs);
847 
848 	if (fixed == 1) {
849 		regs->nip += 4;	/* skip over emulated instruction */
850 		emulate_single_step(regs);
851 		return;
852 	}
853 
854 	/* Operand address was bad */
855 	if (fixed == -EFAULT) {
856 		sig = SIGSEGV;
857 		code = SEGV_ACCERR;
858 	} else {
859 		sig = SIGBUS;
860 		code = BUS_ADRALN;
861 	}
862 	if (user_mode(regs))
863 		_exception(sig, regs, code, regs->dar);
864 	else
865 		bad_page_fault(regs, regs->dar, sig);
866 }
867 
868 void StackOverflow(struct pt_regs *regs)
869 {
870 	printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
871 	       current, regs->gpr[1]);
872 	debugger(regs);
873 	show_regs(regs);
874 	panic("kernel stack overflow");
875 }
876 
877 void nonrecoverable_exception(struct pt_regs *regs)
878 {
879 	printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
880 	       regs->nip, regs->msr);
881 	debugger(regs);
882 	die("nonrecoverable exception", regs, SIGKILL);
883 }
884 
885 void trace_syscall(struct pt_regs *regs)
886 {
887 	printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld    %s\n",
888 	       current, current->pid, regs->nip, regs->link, regs->gpr[0],
889 	       regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
890 }
891 
892 void kernel_fp_unavailable_exception(struct pt_regs *regs)
893 {
894 	printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
895 			  "%lx at %lx\n", regs->trap, regs->nip);
896 	die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
897 }
898 
899 void altivec_unavailable_exception(struct pt_regs *regs)
900 {
901 	if (user_mode(regs)) {
902 		/* A user program has executed an altivec instruction,
903 		   but this kernel doesn't support altivec. */
904 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
905 		return;
906 	}
907 
908 	printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
909 			"%lx at %lx\n", regs->trap, regs->nip);
910 	die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
911 }
912 
913 void performance_monitor_exception(struct pt_regs *regs)
914 {
915 	perf_irq(regs);
916 }
917 
918 #ifdef CONFIG_8xx
919 void SoftwareEmulation(struct pt_regs *regs)
920 {
921 	extern int do_mathemu(struct pt_regs *);
922 	extern int Soft_emulate_8xx(struct pt_regs *);
923 	int errcode;
924 
925 	CHECK_FULL_REGS(regs);
926 
927 	if (!user_mode(regs)) {
928 		debugger(regs);
929 		die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
930 	}
931 
932 #ifdef CONFIG_MATH_EMULATION
933 	errcode = do_mathemu(regs);
934 #else
935 	errcode = Soft_emulate_8xx(regs);
936 #endif
937 	if (errcode) {
938 		if (errcode > 0)
939 			_exception(SIGFPE, regs, 0, 0);
940 		else if (errcode == -EFAULT)
941 			_exception(SIGSEGV, regs, 0, 0);
942 		else
943 			_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
944 	} else
945 		emulate_single_step(regs);
946 }
947 #endif /* CONFIG_8xx */
948 
949 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
950 
951 void DebugException(struct pt_regs *regs, unsigned long debug_status)
952 {
953 	if (debug_status & DBSR_IC) {	/* instruction completion */
954 		regs->msr &= ~MSR_DE;
955 		if (user_mode(regs)) {
956 			current->thread.dbcr0 &= ~DBCR0_IC;
957 		} else {
958 			/* Disable instruction completion */
959 			mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
960 			/* Clear the instruction completion event */
961 			mtspr(SPRN_DBSR, DBSR_IC);
962 			if (debugger_sstep(regs))
963 				return;
964 		}
965 		_exception(SIGTRAP, regs, TRAP_TRACE, 0);
966 	}
967 }
968 #endif /* CONFIG_4xx || CONFIG_BOOKE */
969 
970 #if !defined(CONFIG_TAU_INT)
971 void TAUException(struct pt_regs *regs)
972 {
973 	printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx    %s\n",
974 	       regs->nip, regs->msr, regs->trap, print_tainted());
975 }
976 #endif /* CONFIG_INT_TAU */
977 
978 #ifdef CONFIG_ALTIVEC
979 void altivec_assist_exception(struct pt_regs *regs)
980 {
981 	int err;
982 
983 	if (!user_mode(regs)) {
984 		printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
985 		       " at %lx\n", regs->nip);
986 		die("Kernel VMX/Altivec assist exception", regs, SIGILL);
987 	}
988 
989 	flush_altivec_to_thread(current);
990 
991 	err = emulate_altivec(regs);
992 	if (err == 0) {
993 		regs->nip += 4;		/* skip emulated instruction */
994 		emulate_single_step(regs);
995 		return;
996 	}
997 
998 	if (err == -EFAULT) {
999 		/* got an error reading the instruction */
1000 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1001 	} else {
1002 		/* didn't recognize the instruction */
1003 		/* XXX quick hack for now: set the non-Java bit in the VSCR */
1004 		if (printk_ratelimit())
1005 			printk(KERN_ERR "Unrecognized altivec instruction "
1006 			       "in %s at %lx\n", current->comm, regs->nip);
1007 		current->thread.vscr.u[3] |= 0x10000;
1008 	}
1009 }
1010 #endif /* CONFIG_ALTIVEC */
1011 
1012 #ifdef CONFIG_FSL_BOOKE
1013 void CacheLockingException(struct pt_regs *regs, unsigned long address,
1014 			   unsigned long error_code)
1015 {
1016 	/* We treat cache locking instructions from the user
1017 	 * as priv ops, in the future we could try to do
1018 	 * something smarter
1019 	 */
1020 	if (error_code & (ESR_DLK|ESR_ILK))
1021 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1022 	return;
1023 }
1024 #endif /* CONFIG_FSL_BOOKE */
1025 
1026 #ifdef CONFIG_SPE
1027 void SPEFloatingPointException(struct pt_regs *regs)
1028 {
1029 	unsigned long spefscr;
1030 	int fpexc_mode;
1031 	int code = 0;
1032 
1033 	spefscr = current->thread.spefscr;
1034 	fpexc_mode = current->thread.fpexc_mode;
1035 
1036 	/* Hardware does not neccessarily set sticky
1037 	 * underflow/overflow/invalid flags */
1038 	if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1039 		code = FPE_FLTOVF;
1040 		spefscr |= SPEFSCR_FOVFS;
1041 	}
1042 	else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1043 		code = FPE_FLTUND;
1044 		spefscr |= SPEFSCR_FUNFS;
1045 	}
1046 	else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1047 		code = FPE_FLTDIV;
1048 	else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1049 		code = FPE_FLTINV;
1050 		spefscr |= SPEFSCR_FINVS;
1051 	}
1052 	else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1053 		code = FPE_FLTRES;
1054 
1055 	current->thread.spefscr = spefscr;
1056 
1057 	_exception(SIGFPE, regs, code, regs->nip);
1058 	return;
1059 }
1060 #endif
1061 
1062 /*
1063  * We enter here if we get an unrecoverable exception, that is, one
1064  * that happened at a point where the RI (recoverable interrupt) bit
1065  * in the MSR is 0.  This indicates that SRR0/1 are live, and that
1066  * we therefore lost state by taking this exception.
1067  */
1068 void unrecoverable_exception(struct pt_regs *regs)
1069 {
1070 	printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1071 	       regs->trap, regs->nip);
1072 	die("Unrecoverable exception", regs, SIGABRT);
1073 }
1074 
1075 #ifdef CONFIG_BOOKE_WDT
1076 /*
1077  * Default handler for a Watchdog exception,
1078  * spins until a reboot occurs
1079  */
1080 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1081 {
1082 	/* Generic WatchdogHandler, implement your own */
1083 	mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1084 	return;
1085 }
1086 
1087 void WatchdogException(struct pt_regs *regs)
1088 {
1089 	printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1090 	WatchdogHandler(regs);
1091 }
1092 #endif
1093 
1094 /*
1095  * We enter here if we discover during exception entry that we are
1096  * running in supervisor mode with a userspace value in the stack pointer.
1097  */
1098 void kernel_bad_stack(struct pt_regs *regs)
1099 {
1100 	printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1101 	       regs->gpr[1], regs->nip);
1102 	die("Bad kernel stack pointer", regs, SIGABRT);
1103 }
1104 
1105 void __init trap_init(void)
1106 {
1107 }
1108