xref: /linux/arch/powerpc/kernel/traps.c (revision b0148a98ec5151fec82064d95f11eb9efbc628ea)
1 /*
2  *  Copyright (C) 1995-1996  Gary Thomas (gdt@linuxppc.org)
3  *
4  *  This program is free software; you can redistribute it and/or
5  *  modify it under the terms of the GNU General Public License
6  *  as published by the Free Software Foundation; either version
7  *  2 of the License, or (at your option) any later version.
8  *
9  *  Modified by Cort Dougan (cort@cs.nmt.edu)
10  *  and Paul Mackerras (paulus@samba.org)
11  */
12 
13 /*
14  * This file handles the architecture-dependent parts of hardware exceptions
15  */
16 
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/stddef.h>
22 #include <linux/unistd.h>
23 #include <linux/ptrace.h>
24 #include <linux/slab.h>
25 #include <linux/user.h>
26 #include <linux/a.out.h>
27 #include <linux/interrupt.h>
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/prctl.h>
31 #include <linux/delay.h>
32 #include <linux/kprobes.h>
33 #include <linux/kexec.h>
34 #include <linux/backlight.h>
35 #include <linux/bug.h>
36 
37 #include <asm/kdebug.h>
38 #include <asm/pgtable.h>
39 #include <asm/uaccess.h>
40 #include <asm/system.h>
41 #include <asm/io.h>
42 #include <asm/machdep.h>
43 #include <asm/rtas.h>
44 #include <asm/pmc.h>
45 #ifdef CONFIG_PPC32
46 #include <asm/reg.h>
47 #endif
48 #ifdef CONFIG_PMAC_BACKLIGHT
49 #include <asm/backlight.h>
50 #endif
51 #ifdef CONFIG_PPC64
52 #include <asm/firmware.h>
53 #include <asm/processor.h>
54 #endif
55 #include <asm/kexec.h>
56 
57 #ifdef CONFIG_DEBUGGER
58 int (*__debugger)(struct pt_regs *regs);
59 int (*__debugger_ipi)(struct pt_regs *regs);
60 int (*__debugger_bpt)(struct pt_regs *regs);
61 int (*__debugger_sstep)(struct pt_regs *regs);
62 int (*__debugger_iabr_match)(struct pt_regs *regs);
63 int (*__debugger_dabr_match)(struct pt_regs *regs);
64 int (*__debugger_fault_handler)(struct pt_regs *regs);
65 
66 EXPORT_SYMBOL(__debugger);
67 EXPORT_SYMBOL(__debugger_ipi);
68 EXPORT_SYMBOL(__debugger_bpt);
69 EXPORT_SYMBOL(__debugger_sstep);
70 EXPORT_SYMBOL(__debugger_iabr_match);
71 EXPORT_SYMBOL(__debugger_dabr_match);
72 EXPORT_SYMBOL(__debugger_fault_handler);
73 #endif
74 
75 ATOMIC_NOTIFIER_HEAD(powerpc_die_chain);
76 
77 int register_die_notifier(struct notifier_block *nb)
78 {
79 	return atomic_notifier_chain_register(&powerpc_die_chain, nb);
80 }
81 EXPORT_SYMBOL(register_die_notifier);
82 
83 int unregister_die_notifier(struct notifier_block *nb)
84 {
85 	return atomic_notifier_chain_unregister(&powerpc_die_chain, nb);
86 }
87 EXPORT_SYMBOL(unregister_die_notifier);
88 
89 /*
90  * Trap & Exception support
91  */
92 
93 static DEFINE_SPINLOCK(die_lock);
94 
95 int die(const char *str, struct pt_regs *regs, long err)
96 {
97 	static int die_counter;
98 
99 	if (debugger(regs))
100 		return 1;
101 
102 	console_verbose();
103 	spin_lock_irq(&die_lock);
104 	bust_spinlocks(1);
105 #ifdef CONFIG_PMAC_BACKLIGHT
106 	mutex_lock(&pmac_backlight_mutex);
107 	if (machine_is(powermac) && pmac_backlight) {
108 		struct backlight_properties *props;
109 
110 		down(&pmac_backlight->sem);
111 		props = pmac_backlight->props;
112 		props->brightness = props->max_brightness;
113 		props->power = FB_BLANK_UNBLANK;
114 		props->update_status(pmac_backlight);
115 		up(&pmac_backlight->sem);
116 	}
117 	mutex_unlock(&pmac_backlight_mutex);
118 #endif
119 	printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
120 #ifdef CONFIG_PREEMPT
121 	printk("PREEMPT ");
122 #endif
123 #ifdef CONFIG_SMP
124 	printk("SMP NR_CPUS=%d ", NR_CPUS);
125 #endif
126 #ifdef CONFIG_DEBUG_PAGEALLOC
127 	printk("DEBUG_PAGEALLOC ");
128 #endif
129 #ifdef CONFIG_NUMA
130 	printk("NUMA ");
131 #endif
132 	printk("%s\n", ppc_md.name ? "" : ppc_md.name);
133 
134 	print_modules();
135 	show_regs(regs);
136 	bust_spinlocks(0);
137 	spin_unlock_irq(&die_lock);
138 
139 	if (kexec_should_crash(current) ||
140 		kexec_sr_activated(smp_processor_id()))
141 		crash_kexec(regs);
142 	crash_kexec_secondary(regs);
143 
144 	if (in_interrupt())
145 		panic("Fatal exception in interrupt");
146 
147 	if (panic_on_oops)
148 		panic("Fatal exception");
149 
150 	do_exit(err);
151 
152 	return 0;
153 }
154 
155 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
156 {
157 	siginfo_t info;
158 
159 	if (!user_mode(regs)) {
160 		if (die("Exception in kernel mode", regs, signr))
161 			return;
162 	}
163 
164 	memset(&info, 0, sizeof(info));
165 	info.si_signo = signr;
166 	info.si_code = code;
167 	info.si_addr = (void __user *) addr;
168 	force_sig_info(signr, &info, current);
169 
170 	/*
171 	 * Init gets no signals that it doesn't have a handler for.
172 	 * That's all very well, but if it has caused a synchronous
173 	 * exception and we ignore the resulting signal, it will just
174 	 * generate the same exception over and over again and we get
175 	 * nowhere.  Better to kill it and let the kernel panic.
176 	 */
177 	if (is_init(current)) {
178 		__sighandler_t handler;
179 
180 		spin_lock_irq(&current->sighand->siglock);
181 		handler = current->sighand->action[signr-1].sa.sa_handler;
182 		spin_unlock_irq(&current->sighand->siglock);
183 		if (handler == SIG_DFL) {
184 			/* init has generated a synchronous exception
185 			   and it doesn't have a handler for the signal */
186 			printk(KERN_CRIT "init has generated signal %d "
187 			       "but has no handler for it\n", signr);
188 			do_exit(signr);
189 		}
190 	}
191 }
192 
193 #ifdef CONFIG_PPC64
194 void system_reset_exception(struct pt_regs *regs)
195 {
196 	/* See if any machine dependent calls */
197 	if (ppc_md.system_reset_exception) {
198 		if (ppc_md.system_reset_exception(regs))
199 			return;
200 	}
201 
202 #ifdef CONFIG_KEXEC
203 	cpu_set(smp_processor_id(), cpus_in_sr);
204 #endif
205 
206 	die("System Reset", regs, SIGABRT);
207 
208 	/*
209 	 * Some CPUs when released from the debugger will execute this path.
210 	 * These CPUs entered the debugger via a soft-reset. If the CPU was
211 	 * hung before entering the debugger it will return to the hung
212 	 * state when exiting this function.  This causes a problem in
213 	 * kdump since the hung CPU(s) will not respond to the IPI sent
214 	 * from kdump. To prevent the problem we call crash_kexec_secondary()
215 	 * here. If a kdump had not been initiated or we exit the debugger
216 	 * with the "exit and recover" command (x) crash_kexec_secondary()
217 	 * will return after 5ms and the CPU returns to its previous state.
218 	 */
219 	crash_kexec_secondary(regs);
220 
221 	/* Must die if the interrupt is not recoverable */
222 	if (!(regs->msr & MSR_RI))
223 		panic("Unrecoverable System Reset");
224 
225 	/* What should we do here? We could issue a shutdown or hard reset. */
226 }
227 #endif
228 
229 /*
230  * I/O accesses can cause machine checks on powermacs.
231  * Check if the NIP corresponds to the address of a sync
232  * instruction for which there is an entry in the exception
233  * table.
234  * Note that the 601 only takes a machine check on TEA
235  * (transfer error ack) signal assertion, and does not
236  * set any of the top 16 bits of SRR1.
237  *  -- paulus.
238  */
239 static inline int check_io_access(struct pt_regs *regs)
240 {
241 #ifdef CONFIG_PPC32
242 	unsigned long msr = regs->msr;
243 	const struct exception_table_entry *entry;
244 	unsigned int *nip = (unsigned int *)regs->nip;
245 
246 	if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
247 	    && (entry = search_exception_tables(regs->nip)) != NULL) {
248 		/*
249 		 * Check that it's a sync instruction, or somewhere
250 		 * in the twi; isync; nop sequence that inb/inw/inl uses.
251 		 * As the address is in the exception table
252 		 * we should be able to read the instr there.
253 		 * For the debug message, we look at the preceding
254 		 * load or store.
255 		 */
256 		if (*nip == 0x60000000)		/* nop */
257 			nip -= 2;
258 		else if (*nip == 0x4c00012c)	/* isync */
259 			--nip;
260 		if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
261 			/* sync or twi */
262 			unsigned int rb;
263 
264 			--nip;
265 			rb = (*nip >> 11) & 0x1f;
266 			printk(KERN_DEBUG "%s bad port %lx at %p\n",
267 			       (*nip & 0x100)? "OUT to": "IN from",
268 			       regs->gpr[rb] - _IO_BASE, nip);
269 			regs->msr |= MSR_RI;
270 			regs->nip = entry->fixup;
271 			return 1;
272 		}
273 	}
274 #endif /* CONFIG_PPC32 */
275 	return 0;
276 }
277 
278 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
279 /* On 4xx, the reason for the machine check or program exception
280    is in the ESR. */
281 #define get_reason(regs)	((regs)->dsisr)
282 #ifndef CONFIG_FSL_BOOKE
283 #define get_mc_reason(regs)	((regs)->dsisr)
284 #else
285 #define get_mc_reason(regs)	(mfspr(SPRN_MCSR))
286 #endif
287 #define REASON_FP		ESR_FP
288 #define REASON_ILLEGAL		(ESR_PIL | ESR_PUO)
289 #define REASON_PRIVILEGED	ESR_PPR
290 #define REASON_TRAP		ESR_PTR
291 
292 /* single-step stuff */
293 #define single_stepping(regs)	(current->thread.dbcr0 & DBCR0_IC)
294 #define clear_single_step(regs)	(current->thread.dbcr0 &= ~DBCR0_IC)
295 
296 #else
297 /* On non-4xx, the reason for the machine check or program
298    exception is in the MSR. */
299 #define get_reason(regs)	((regs)->msr)
300 #define get_mc_reason(regs)	((regs)->msr)
301 #define REASON_FP		0x100000
302 #define REASON_ILLEGAL		0x80000
303 #define REASON_PRIVILEGED	0x40000
304 #define REASON_TRAP		0x20000
305 
306 #define single_stepping(regs)	((regs)->msr & MSR_SE)
307 #define clear_single_step(regs)	((regs)->msr &= ~MSR_SE)
308 #endif
309 
310 /*
311  * This is "fall-back" implementation for configurations
312  * which don't provide platform-specific machine check info
313  */
314 void __attribute__ ((weak))
315 platform_machine_check(struct pt_regs *regs)
316 {
317 }
318 
319 void machine_check_exception(struct pt_regs *regs)
320 {
321 	int recover = 0;
322 	unsigned long reason = get_mc_reason(regs);
323 
324 	/* See if any machine dependent calls */
325 	if (ppc_md.machine_check_exception)
326 		recover = ppc_md.machine_check_exception(regs);
327 
328 	if (recover)
329 		return;
330 
331 	if (user_mode(regs)) {
332 		regs->msr |= MSR_RI;
333 		_exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
334 		return;
335 	}
336 
337 #if defined(CONFIG_8xx) && defined(CONFIG_PCI)
338 	/* the qspan pci read routines can cause machine checks -- Cort */
339 	bad_page_fault(regs, regs->dar, SIGBUS);
340 	return;
341 #endif
342 
343 	if (debugger_fault_handler(regs)) {
344 		regs->msr |= MSR_RI;
345 		return;
346 	}
347 
348 	if (check_io_access(regs))
349 		return;
350 
351 #if defined(CONFIG_4xx) && !defined(CONFIG_440A)
352 	if (reason & ESR_IMCP) {
353 		printk("Instruction");
354 		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
355 	} else
356 		printk("Data");
357 	printk(" machine check in kernel mode.\n");
358 #elif defined(CONFIG_440A)
359 	printk("Machine check in kernel mode.\n");
360 	if (reason & ESR_IMCP){
361 		printk("Instruction Synchronous Machine Check exception\n");
362 		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
363 	}
364 	else {
365 		u32 mcsr = mfspr(SPRN_MCSR);
366 		if (mcsr & MCSR_IB)
367 			printk("Instruction Read PLB Error\n");
368 		if (mcsr & MCSR_DRB)
369 			printk("Data Read PLB Error\n");
370 		if (mcsr & MCSR_DWB)
371 			printk("Data Write PLB Error\n");
372 		if (mcsr & MCSR_TLBP)
373 			printk("TLB Parity Error\n");
374 		if (mcsr & MCSR_ICP){
375 			flush_instruction_cache();
376 			printk("I-Cache Parity Error\n");
377 		}
378 		if (mcsr & MCSR_DCSP)
379 			printk("D-Cache Search Parity Error\n");
380 		if (mcsr & MCSR_DCFP)
381 			printk("D-Cache Flush Parity Error\n");
382 		if (mcsr & MCSR_IMPE)
383 			printk("Machine Check exception is imprecise\n");
384 
385 		/* Clear MCSR */
386 		mtspr(SPRN_MCSR, mcsr);
387 	}
388 #elif defined (CONFIG_E500)
389 	printk("Machine check in kernel mode.\n");
390 	printk("Caused by (from MCSR=%lx): ", reason);
391 
392 	if (reason & MCSR_MCP)
393 		printk("Machine Check Signal\n");
394 	if (reason & MCSR_ICPERR)
395 		printk("Instruction Cache Parity Error\n");
396 	if (reason & MCSR_DCP_PERR)
397 		printk("Data Cache Push Parity Error\n");
398 	if (reason & MCSR_DCPERR)
399 		printk("Data Cache Parity Error\n");
400 	if (reason & MCSR_GL_CI)
401 		printk("Guarded Load or Cache-Inhibited stwcx.\n");
402 	if (reason & MCSR_BUS_IAERR)
403 		printk("Bus - Instruction Address Error\n");
404 	if (reason & MCSR_BUS_RAERR)
405 		printk("Bus - Read Address Error\n");
406 	if (reason & MCSR_BUS_WAERR)
407 		printk("Bus - Write Address Error\n");
408 	if (reason & MCSR_BUS_IBERR)
409 		printk("Bus - Instruction Data Error\n");
410 	if (reason & MCSR_BUS_RBERR)
411 		printk("Bus - Read Data Bus Error\n");
412 	if (reason & MCSR_BUS_WBERR)
413 		printk("Bus - Read Data Bus Error\n");
414 	if (reason & MCSR_BUS_IPERR)
415 		printk("Bus - Instruction Parity Error\n");
416 	if (reason & MCSR_BUS_RPERR)
417 		printk("Bus - Read Parity Error\n");
418 #elif defined (CONFIG_E200)
419 	printk("Machine check in kernel mode.\n");
420 	printk("Caused by (from MCSR=%lx): ", reason);
421 
422 	if (reason & MCSR_MCP)
423 		printk("Machine Check Signal\n");
424 	if (reason & MCSR_CP_PERR)
425 		printk("Cache Push Parity Error\n");
426 	if (reason & MCSR_CPERR)
427 		printk("Cache Parity Error\n");
428 	if (reason & MCSR_EXCP_ERR)
429 		printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
430 	if (reason & MCSR_BUS_IRERR)
431 		printk("Bus - Read Bus Error on instruction fetch\n");
432 	if (reason & MCSR_BUS_DRERR)
433 		printk("Bus - Read Bus Error on data load\n");
434 	if (reason & MCSR_BUS_WRERR)
435 		printk("Bus - Write Bus Error on buffered store or cache line push\n");
436 #else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */
437 	printk("Machine check in kernel mode.\n");
438 	printk("Caused by (from SRR1=%lx): ", reason);
439 	switch (reason & 0x601F0000) {
440 	case 0x80000:
441 		printk("Machine check signal\n");
442 		break;
443 	case 0:		/* for 601 */
444 	case 0x40000:
445 	case 0x140000:	/* 7450 MSS error and TEA */
446 		printk("Transfer error ack signal\n");
447 		break;
448 	case 0x20000:
449 		printk("Data parity error signal\n");
450 		break;
451 	case 0x10000:
452 		printk("Address parity error signal\n");
453 		break;
454 	case 0x20000000:
455 		printk("L1 Data Cache error\n");
456 		break;
457 	case 0x40000000:
458 		printk("L1 Instruction Cache error\n");
459 		break;
460 	case 0x00100000:
461 		printk("L2 data cache parity error\n");
462 		break;
463 	default:
464 		printk("Unknown values in msr\n");
465 	}
466 #endif /* CONFIG_4xx */
467 
468 	/*
469 	 * Optional platform-provided routine to print out
470 	 * additional info, e.g. bus error registers.
471 	 */
472 	platform_machine_check(regs);
473 
474 	if (debugger_fault_handler(regs))
475 		return;
476 	die("Machine check", regs, SIGBUS);
477 
478 	/* Must die if the interrupt is not recoverable */
479 	if (!(regs->msr & MSR_RI))
480 		panic("Unrecoverable Machine check");
481 }
482 
483 void SMIException(struct pt_regs *regs)
484 {
485 	die("System Management Interrupt", regs, SIGABRT);
486 }
487 
488 void unknown_exception(struct pt_regs *regs)
489 {
490 	printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
491 	       regs->nip, regs->msr, regs->trap);
492 
493 	_exception(SIGTRAP, regs, 0, 0);
494 }
495 
496 void instruction_breakpoint_exception(struct pt_regs *regs)
497 {
498 	if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
499 					5, SIGTRAP) == NOTIFY_STOP)
500 		return;
501 	if (debugger_iabr_match(regs))
502 		return;
503 	_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
504 }
505 
506 void RunModeException(struct pt_regs *regs)
507 {
508 	_exception(SIGTRAP, regs, 0, 0);
509 }
510 
511 void __kprobes single_step_exception(struct pt_regs *regs)
512 {
513 	regs->msr &= ~(MSR_SE | MSR_BE);  /* Turn off 'trace' bits */
514 
515 	if (notify_die(DIE_SSTEP, "single_step", regs, 5,
516 					5, SIGTRAP) == NOTIFY_STOP)
517 		return;
518 	if (debugger_sstep(regs))
519 		return;
520 
521 	_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
522 }
523 
524 /*
525  * After we have successfully emulated an instruction, we have to
526  * check if the instruction was being single-stepped, and if so,
527  * pretend we got a single-step exception.  This was pointed out
528  * by Kumar Gala.  -- paulus
529  */
530 static void emulate_single_step(struct pt_regs *regs)
531 {
532 	if (single_stepping(regs)) {
533 		clear_single_step(regs);
534 		_exception(SIGTRAP, regs, TRAP_TRACE, 0);
535 	}
536 }
537 
538 static inline int __parse_fpscr(unsigned long fpscr)
539 {
540 	int ret = 0;
541 
542 	/* Invalid operation */
543 	if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
544 		ret = FPE_FLTINV;
545 
546 	/* Overflow */
547 	else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
548 		ret = FPE_FLTOVF;
549 
550 	/* Underflow */
551 	else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
552 		ret = FPE_FLTUND;
553 
554 	/* Divide by zero */
555 	else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
556 		ret = FPE_FLTDIV;
557 
558 	/* Inexact result */
559 	else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
560 		ret = FPE_FLTRES;
561 
562 	return ret;
563 }
564 
565 static void parse_fpe(struct pt_regs *regs)
566 {
567 	int code = 0;
568 
569 	flush_fp_to_thread(current);
570 
571 	code = __parse_fpscr(current->thread.fpscr.val);
572 
573 	_exception(SIGFPE, regs, code, regs->nip);
574 }
575 
576 /*
577  * Illegal instruction emulation support.  Originally written to
578  * provide the PVR to user applications using the mfspr rd, PVR.
579  * Return non-zero if we can't emulate, or -EFAULT if the associated
580  * memory access caused an access fault.  Return zero on success.
581  *
582  * There are a couple of ways to do this, either "decode" the instruction
583  * or directly match lots of bits.  In this case, matching lots of
584  * bits is faster and easier.
585  *
586  */
587 #define INST_MFSPR_PVR		0x7c1f42a6
588 #define INST_MFSPR_PVR_MASK	0xfc1fffff
589 
590 #define INST_DCBA		0x7c0005ec
591 #define INST_DCBA_MASK		0xfc0007fe
592 
593 #define INST_MCRXR		0x7c000400
594 #define INST_MCRXR_MASK		0xfc0007fe
595 
596 #define INST_STRING		0x7c00042a
597 #define INST_STRING_MASK	0xfc0007fe
598 #define INST_STRING_GEN_MASK	0xfc00067e
599 #define INST_LSWI		0x7c0004aa
600 #define INST_LSWX		0x7c00042a
601 #define INST_STSWI		0x7c0005aa
602 #define INST_STSWX		0x7c00052a
603 
604 #define INST_POPCNTB		0x7c0000f4
605 #define INST_POPCNTB_MASK	0xfc0007fe
606 
607 static int emulate_string_inst(struct pt_regs *regs, u32 instword)
608 {
609 	u8 rT = (instword >> 21) & 0x1f;
610 	u8 rA = (instword >> 16) & 0x1f;
611 	u8 NB_RB = (instword >> 11) & 0x1f;
612 	u32 num_bytes;
613 	unsigned long EA;
614 	int pos = 0;
615 
616 	/* Early out if we are an invalid form of lswx */
617 	if ((instword & INST_STRING_MASK) == INST_LSWX)
618 		if ((rT == rA) || (rT == NB_RB))
619 			return -EINVAL;
620 
621 	EA = (rA == 0) ? 0 : regs->gpr[rA];
622 
623 	switch (instword & INST_STRING_MASK) {
624 		case INST_LSWX:
625 		case INST_STSWX:
626 			EA += NB_RB;
627 			num_bytes = regs->xer & 0x7f;
628 			break;
629 		case INST_LSWI:
630 		case INST_STSWI:
631 			num_bytes = (NB_RB == 0) ? 32 : NB_RB;
632 			break;
633 		default:
634 			return -EINVAL;
635 	}
636 
637 	while (num_bytes != 0)
638 	{
639 		u8 val;
640 		u32 shift = 8 * (3 - (pos & 0x3));
641 
642 		switch ((instword & INST_STRING_MASK)) {
643 			case INST_LSWX:
644 			case INST_LSWI:
645 				if (get_user(val, (u8 __user *)EA))
646 					return -EFAULT;
647 				/* first time updating this reg,
648 				 * zero it out */
649 				if (pos == 0)
650 					regs->gpr[rT] = 0;
651 				regs->gpr[rT] |= val << shift;
652 				break;
653 			case INST_STSWI:
654 			case INST_STSWX:
655 				val = regs->gpr[rT] >> shift;
656 				if (put_user(val, (u8 __user *)EA))
657 					return -EFAULT;
658 				break;
659 		}
660 		/* move EA to next address */
661 		EA += 1;
662 		num_bytes--;
663 
664 		/* manage our position within the register */
665 		if (++pos == 4) {
666 			pos = 0;
667 			if (++rT == 32)
668 				rT = 0;
669 		}
670 	}
671 
672 	return 0;
673 }
674 
675 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
676 {
677 	u32 ra,rs;
678 	unsigned long tmp;
679 
680 	ra = (instword >> 16) & 0x1f;
681 	rs = (instword >> 21) & 0x1f;
682 
683 	tmp = regs->gpr[rs];
684 	tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
685 	tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
686 	tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
687 	regs->gpr[ra] = tmp;
688 
689 	return 0;
690 }
691 
692 static int emulate_instruction(struct pt_regs *regs)
693 {
694 	u32 instword;
695 	u32 rd;
696 
697 	if (!user_mode(regs) || (regs->msr & MSR_LE))
698 		return -EINVAL;
699 	CHECK_FULL_REGS(regs);
700 
701 	if (get_user(instword, (u32 __user *)(regs->nip)))
702 		return -EFAULT;
703 
704 	/* Emulate the mfspr rD, PVR. */
705 	if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) {
706 		rd = (instword >> 21) & 0x1f;
707 		regs->gpr[rd] = mfspr(SPRN_PVR);
708 		return 0;
709 	}
710 
711 	/* Emulating the dcba insn is just a no-op.  */
712 	if ((instword & INST_DCBA_MASK) == INST_DCBA)
713 		return 0;
714 
715 	/* Emulate the mcrxr insn.  */
716 	if ((instword & INST_MCRXR_MASK) == INST_MCRXR) {
717 		int shift = (instword >> 21) & 0x1c;
718 		unsigned long msk = 0xf0000000UL >> shift;
719 
720 		regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
721 		regs->xer &= ~0xf0000000UL;
722 		return 0;
723 	}
724 
725 	/* Emulate load/store string insn. */
726 	if ((instword & INST_STRING_GEN_MASK) == INST_STRING)
727 		return emulate_string_inst(regs, instword);
728 
729 	/* Emulate the popcntb (Population Count Bytes) instruction. */
730 	if ((instword & INST_POPCNTB_MASK) == INST_POPCNTB) {
731 		return emulate_popcntb_inst(regs, instword);
732 	}
733 
734 	return -EINVAL;
735 }
736 
737 int is_valid_bugaddr(unsigned long addr)
738 {
739 	return is_kernel_addr(addr);
740 }
741 
742 void __kprobes program_check_exception(struct pt_regs *regs)
743 {
744 	unsigned int reason = get_reason(regs);
745 	extern int do_mathemu(struct pt_regs *regs);
746 
747 	/* We can now get here via a FP Unavailable exception if the core
748 	 * has no FPU, in that case the reason flags will be 0 */
749 
750 	if (reason & REASON_FP) {
751 		/* IEEE FP exception */
752 		parse_fpe(regs);
753 		return;
754 	}
755 	if (reason & REASON_TRAP) {
756 		/* trap exception */
757 		if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
758 				== NOTIFY_STOP)
759 			return;
760 		if (debugger_bpt(regs))
761 			return;
762 
763 		if (!(regs->msr & MSR_PR) &&  /* not user-mode */
764 		    report_bug(regs->nip) == BUG_TRAP_TYPE_WARN) {
765 			regs->nip += 4;
766 			return;
767 		}
768 		_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
769 		return;
770 	}
771 
772 	local_irq_enable();
773 
774 #ifdef CONFIG_MATH_EMULATION
775 	/* (reason & REASON_ILLEGAL) would be the obvious thing here,
776 	 * but there seems to be a hardware bug on the 405GP (RevD)
777 	 * that means ESR is sometimes set incorrectly - either to
778 	 * ESR_DST (!?) or 0.  In the process of chasing this with the
779 	 * hardware people - not sure if it can happen on any illegal
780 	 * instruction or only on FP instructions, whether there is a
781 	 * pattern to occurences etc. -dgibson 31/Mar/2003 */
782 	switch (do_mathemu(regs)) {
783 	case 0:
784 		emulate_single_step(regs);
785 		return;
786 	case 1: {
787 			int code = 0;
788 			code = __parse_fpscr(current->thread.fpscr.val);
789 			_exception(SIGFPE, regs, code, regs->nip);
790 			return;
791 		}
792 	case -EFAULT:
793 		_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
794 		return;
795 	}
796 	/* fall through on any other errors */
797 #endif /* CONFIG_MATH_EMULATION */
798 
799 	/* Try to emulate it if we should. */
800 	if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
801 		switch (emulate_instruction(regs)) {
802 		case 0:
803 			regs->nip += 4;
804 			emulate_single_step(regs);
805 			return;
806 		case -EFAULT:
807 			_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
808 			return;
809 		}
810 	}
811 
812 	if (reason & REASON_PRIVILEGED)
813 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
814 	else
815 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
816 }
817 
818 void alignment_exception(struct pt_regs *regs)
819 {
820 	int sig, code, fixed = 0;
821 
822 	/* we don't implement logging of alignment exceptions */
823 	if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
824 		fixed = fix_alignment(regs);
825 
826 	if (fixed == 1) {
827 		regs->nip += 4;	/* skip over emulated instruction */
828 		emulate_single_step(regs);
829 		return;
830 	}
831 
832 	/* Operand address was bad */
833 	if (fixed == -EFAULT) {
834 		sig = SIGSEGV;
835 		code = SEGV_ACCERR;
836 	} else {
837 		sig = SIGBUS;
838 		code = BUS_ADRALN;
839 	}
840 	if (user_mode(regs))
841 		_exception(sig, regs, code, regs->dar);
842 	else
843 		bad_page_fault(regs, regs->dar, sig);
844 }
845 
846 void StackOverflow(struct pt_regs *regs)
847 {
848 	printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
849 	       current, regs->gpr[1]);
850 	debugger(regs);
851 	show_regs(regs);
852 	panic("kernel stack overflow");
853 }
854 
855 void nonrecoverable_exception(struct pt_regs *regs)
856 {
857 	printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
858 	       regs->nip, regs->msr);
859 	debugger(regs);
860 	die("nonrecoverable exception", regs, SIGKILL);
861 }
862 
863 void trace_syscall(struct pt_regs *regs)
864 {
865 	printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld    %s\n",
866 	       current, current->pid, regs->nip, regs->link, regs->gpr[0],
867 	       regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
868 }
869 
870 void kernel_fp_unavailable_exception(struct pt_regs *regs)
871 {
872 	printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
873 			  "%lx at %lx\n", regs->trap, regs->nip);
874 	die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
875 }
876 
877 void altivec_unavailable_exception(struct pt_regs *regs)
878 {
879 	if (user_mode(regs)) {
880 		/* A user program has executed an altivec instruction,
881 		   but this kernel doesn't support altivec. */
882 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
883 		return;
884 	}
885 
886 	printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
887 			"%lx at %lx\n", regs->trap, regs->nip);
888 	die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
889 }
890 
891 void performance_monitor_exception(struct pt_regs *regs)
892 {
893 	perf_irq(regs);
894 }
895 
896 #ifdef CONFIG_8xx
897 void SoftwareEmulation(struct pt_regs *regs)
898 {
899 	extern int do_mathemu(struct pt_regs *);
900 	extern int Soft_emulate_8xx(struct pt_regs *);
901 	int errcode;
902 
903 	CHECK_FULL_REGS(regs);
904 
905 	if (!user_mode(regs)) {
906 		debugger(regs);
907 		die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
908 	}
909 
910 #ifdef CONFIG_MATH_EMULATION
911 	errcode = do_mathemu(regs);
912 
913 	switch (errcode) {
914 	case 0:
915 		emulate_single_step(regs);
916 		return;
917 	case 1: {
918 			int code = 0;
919 			code = __parse_fpscr(current->thread.fpscr.val);
920 			_exception(SIGFPE, regs, code, regs->nip);
921 			return;
922 		}
923 	case -EFAULT:
924 		_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
925 		return;
926 	default:
927 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
928 		return;
929 	}
930 
931 #else
932 	errcode = Soft_emulate_8xx(regs);
933 	switch (errcode) {
934 	case 0:
935 		emulate_single_step(regs);
936 		return;
937 	case 1:
938 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
939 		return;
940 	case -EFAULT:
941 		_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
942 		return;
943 	}
944 #endif
945 }
946 #endif /* CONFIG_8xx */
947 
948 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
949 
950 void DebugException(struct pt_regs *regs, unsigned long debug_status)
951 {
952 	if (debug_status & DBSR_IC) {	/* instruction completion */
953 		regs->msr &= ~MSR_DE;
954 		if (user_mode(regs)) {
955 			current->thread.dbcr0 &= ~DBCR0_IC;
956 		} else {
957 			/* Disable instruction completion */
958 			mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
959 			/* Clear the instruction completion event */
960 			mtspr(SPRN_DBSR, DBSR_IC);
961 			if (debugger_sstep(regs))
962 				return;
963 		}
964 		_exception(SIGTRAP, regs, TRAP_TRACE, 0);
965 	}
966 }
967 #endif /* CONFIG_4xx || CONFIG_BOOKE */
968 
969 #if !defined(CONFIG_TAU_INT)
970 void TAUException(struct pt_regs *regs)
971 {
972 	printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx    %s\n",
973 	       regs->nip, regs->msr, regs->trap, print_tainted());
974 }
975 #endif /* CONFIG_INT_TAU */
976 
977 #ifdef CONFIG_ALTIVEC
978 void altivec_assist_exception(struct pt_regs *regs)
979 {
980 	int err;
981 
982 	if (!user_mode(regs)) {
983 		printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
984 		       " at %lx\n", regs->nip);
985 		die("Kernel VMX/Altivec assist exception", regs, SIGILL);
986 	}
987 
988 	flush_altivec_to_thread(current);
989 
990 	err = emulate_altivec(regs);
991 	if (err == 0) {
992 		regs->nip += 4;		/* skip emulated instruction */
993 		emulate_single_step(regs);
994 		return;
995 	}
996 
997 	if (err == -EFAULT) {
998 		/* got an error reading the instruction */
999 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1000 	} else {
1001 		/* didn't recognize the instruction */
1002 		/* XXX quick hack for now: set the non-Java bit in the VSCR */
1003 		if (printk_ratelimit())
1004 			printk(KERN_ERR "Unrecognized altivec instruction "
1005 			       "in %s at %lx\n", current->comm, regs->nip);
1006 		current->thread.vscr.u[3] |= 0x10000;
1007 	}
1008 }
1009 #endif /* CONFIG_ALTIVEC */
1010 
1011 #ifdef CONFIG_FSL_BOOKE
1012 void CacheLockingException(struct pt_regs *regs, unsigned long address,
1013 			   unsigned long error_code)
1014 {
1015 	/* We treat cache locking instructions from the user
1016 	 * as priv ops, in the future we could try to do
1017 	 * something smarter
1018 	 */
1019 	if (error_code & (ESR_DLK|ESR_ILK))
1020 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1021 	return;
1022 }
1023 #endif /* CONFIG_FSL_BOOKE */
1024 
1025 #ifdef CONFIG_SPE
1026 void SPEFloatingPointException(struct pt_regs *regs)
1027 {
1028 	unsigned long spefscr;
1029 	int fpexc_mode;
1030 	int code = 0;
1031 
1032 	spefscr = current->thread.spefscr;
1033 	fpexc_mode = current->thread.fpexc_mode;
1034 
1035 	/* Hardware does not neccessarily set sticky
1036 	 * underflow/overflow/invalid flags */
1037 	if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1038 		code = FPE_FLTOVF;
1039 		spefscr |= SPEFSCR_FOVFS;
1040 	}
1041 	else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1042 		code = FPE_FLTUND;
1043 		spefscr |= SPEFSCR_FUNFS;
1044 	}
1045 	else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1046 		code = FPE_FLTDIV;
1047 	else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1048 		code = FPE_FLTINV;
1049 		spefscr |= SPEFSCR_FINVS;
1050 	}
1051 	else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1052 		code = FPE_FLTRES;
1053 
1054 	current->thread.spefscr = spefscr;
1055 
1056 	_exception(SIGFPE, regs, code, regs->nip);
1057 	return;
1058 }
1059 #endif
1060 
1061 /*
1062  * We enter here if we get an unrecoverable exception, that is, one
1063  * that happened at a point where the RI (recoverable interrupt) bit
1064  * in the MSR is 0.  This indicates that SRR0/1 are live, and that
1065  * we therefore lost state by taking this exception.
1066  */
1067 void unrecoverable_exception(struct pt_regs *regs)
1068 {
1069 	printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1070 	       regs->trap, regs->nip);
1071 	die("Unrecoverable exception", regs, SIGABRT);
1072 }
1073 
1074 #ifdef CONFIG_BOOKE_WDT
1075 /*
1076  * Default handler for a Watchdog exception,
1077  * spins until a reboot occurs
1078  */
1079 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1080 {
1081 	/* Generic WatchdogHandler, implement your own */
1082 	mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1083 	return;
1084 }
1085 
1086 void WatchdogException(struct pt_regs *regs)
1087 {
1088 	printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1089 	WatchdogHandler(regs);
1090 }
1091 #endif
1092 
1093 /*
1094  * We enter here if we discover during exception entry that we are
1095  * running in supervisor mode with a userspace value in the stack pointer.
1096  */
1097 void kernel_bad_stack(struct pt_regs *regs)
1098 {
1099 	printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1100 	       regs->gpr[1], regs->nip);
1101 	die("Bad kernel stack pointer", regs, SIGABRT);
1102 }
1103 
1104 void __init trap_init(void)
1105 {
1106 }
1107