xref: /linux/arch/powerpc/kernel/traps.c (revision 424a6f6ef990b7e9f56f6627bfc6c46b493faeb4)
1 /*
2  *  Copyright (C) 1995-1996  Gary Thomas (gdt@linuxppc.org)
3  *  Copyright 2007-2010 Freescale Semiconductor, Inc.
4  *
5  *  This program is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU General Public License
7  *  as published by the Free Software Foundation; either version
8  *  2 of the License, or (at your option) any later version.
9  *
10  *  Modified by Cort Dougan (cort@cs.nmt.edu)
11  *  and Paul Mackerras (paulus@samba.org)
12  */
13 
14 /*
15  * This file handles the architecture-dependent parts of hardware exceptions
16  */
17 
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/kernel.h>
21 #include <linux/mm.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/user.h>
26 #include <linux/interrupt.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/prctl.h>
30 #include <linux/delay.h>
31 #include <linux/kprobes.h>
32 #include <linux/kexec.h>
33 #include <linux/backlight.h>
34 #include <linux/bug.h>
35 #include <linux/kdebug.h>
36 #include <linux/debugfs.h>
37 #include <linux/ratelimit.h>
38 
39 #include <asm/emulated_ops.h>
40 #include <asm/pgtable.h>
41 #include <asm/uaccess.h>
42 #include <asm/system.h>
43 #include <asm/io.h>
44 #include <asm/machdep.h>
45 #include <asm/rtas.h>
46 #include <asm/pmc.h>
47 #ifdef CONFIG_PPC32
48 #include <asm/reg.h>
49 #endif
50 #ifdef CONFIG_PMAC_BACKLIGHT
51 #include <asm/backlight.h>
52 #endif
53 #ifdef CONFIG_PPC64
54 #include <asm/firmware.h>
55 #include <asm/processor.h>
56 #endif
57 #include <asm/kexec.h>
58 #include <asm/ppc-opcode.h>
59 #include <asm/rio.h>
60 #include <asm/fadump.h>
61 
62 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
63 int (*__debugger)(struct pt_regs *regs) __read_mostly;
64 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
65 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
66 int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
67 int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
68 int (*__debugger_dabr_match)(struct pt_regs *regs) __read_mostly;
69 int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
70 
71 EXPORT_SYMBOL(__debugger);
72 EXPORT_SYMBOL(__debugger_ipi);
73 EXPORT_SYMBOL(__debugger_bpt);
74 EXPORT_SYMBOL(__debugger_sstep);
75 EXPORT_SYMBOL(__debugger_iabr_match);
76 EXPORT_SYMBOL(__debugger_dabr_match);
77 EXPORT_SYMBOL(__debugger_fault_handler);
78 #endif
79 
80 /*
81  * Trap & Exception support
82  */
83 
84 #ifdef CONFIG_PMAC_BACKLIGHT
85 static void pmac_backlight_unblank(void)
86 {
87 	mutex_lock(&pmac_backlight_mutex);
88 	if (pmac_backlight) {
89 		struct backlight_properties *props;
90 
91 		props = &pmac_backlight->props;
92 		props->brightness = props->max_brightness;
93 		props->power = FB_BLANK_UNBLANK;
94 		backlight_update_status(pmac_backlight);
95 	}
96 	mutex_unlock(&pmac_backlight_mutex);
97 }
98 #else
99 static inline void pmac_backlight_unblank(void) { }
100 #endif
101 
102 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
103 static int die_owner = -1;
104 static unsigned int die_nest_count;
105 static int die_counter;
106 
107 static unsigned __kprobes long oops_begin(struct pt_regs *regs)
108 {
109 	int cpu;
110 	unsigned long flags;
111 
112 	if (debugger(regs))
113 		return 1;
114 
115 	oops_enter();
116 
117 	/* racy, but better than risking deadlock. */
118 	raw_local_irq_save(flags);
119 	cpu = smp_processor_id();
120 	if (!arch_spin_trylock(&die_lock)) {
121 		if (cpu == die_owner)
122 			/* nested oops. should stop eventually */;
123 		else
124 			arch_spin_lock(&die_lock);
125 	}
126 	die_nest_count++;
127 	die_owner = cpu;
128 	console_verbose();
129 	bust_spinlocks(1);
130 	if (machine_is(powermac))
131 		pmac_backlight_unblank();
132 	return flags;
133 }
134 
135 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
136 			       int signr)
137 {
138 	bust_spinlocks(0);
139 	die_owner = -1;
140 	add_taint(TAINT_DIE);
141 	die_nest_count--;
142 	oops_exit();
143 	printk("\n");
144 	if (!die_nest_count)
145 		/* Nest count reaches zero, release the lock. */
146 		arch_spin_unlock(&die_lock);
147 	raw_local_irq_restore(flags);
148 
149 	crash_fadump(regs, "die oops");
150 
151 	/*
152 	 * A system reset (0x100) is a request to dump, so we always send
153 	 * it through the crashdump code.
154 	 */
155 	if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) {
156 		crash_kexec(regs);
157 
158 		/*
159 		 * We aren't the primary crash CPU. We need to send it
160 		 * to a holding pattern to avoid it ending up in the panic
161 		 * code.
162 		 */
163 		crash_kexec_secondary(regs);
164 	}
165 
166 	if (!signr)
167 		return;
168 
169 	/*
170 	 * While our oops output is serialised by a spinlock, output
171 	 * from panic() called below can race and corrupt it. If we
172 	 * know we are going to panic, delay for 1 second so we have a
173 	 * chance to get clean backtraces from all CPUs that are oopsing.
174 	 */
175 	if (in_interrupt() || panic_on_oops || !current->pid ||
176 	    is_global_init(current)) {
177 		mdelay(MSEC_PER_SEC);
178 	}
179 
180 	if (in_interrupt())
181 		panic("Fatal exception in interrupt");
182 	if (panic_on_oops)
183 		panic("Fatal exception");
184 	do_exit(signr);
185 }
186 
187 static int __kprobes __die(const char *str, struct pt_regs *regs, long err)
188 {
189 	printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
190 #ifdef CONFIG_PREEMPT
191 	printk("PREEMPT ");
192 #endif
193 #ifdef CONFIG_SMP
194 	printk("SMP NR_CPUS=%d ", NR_CPUS);
195 #endif
196 #ifdef CONFIG_DEBUG_PAGEALLOC
197 	printk("DEBUG_PAGEALLOC ");
198 #endif
199 #ifdef CONFIG_NUMA
200 	printk("NUMA ");
201 #endif
202 	printk("%s\n", ppc_md.name ? ppc_md.name : "");
203 
204 	if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
205 		return 1;
206 
207 	print_modules();
208 	show_regs(regs);
209 
210 	return 0;
211 }
212 
213 void die(const char *str, struct pt_regs *regs, long err)
214 {
215 	unsigned long flags = oops_begin(regs);
216 
217 	if (__die(str, regs, err))
218 		err = 0;
219 	oops_end(flags, regs, err);
220 }
221 
222 void user_single_step_siginfo(struct task_struct *tsk,
223 				struct pt_regs *regs, siginfo_t *info)
224 {
225 	memset(info, 0, sizeof(*info));
226 	info->si_signo = SIGTRAP;
227 	info->si_code = TRAP_TRACE;
228 	info->si_addr = (void __user *)regs->nip;
229 }
230 
231 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
232 {
233 	siginfo_t info;
234 	const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \
235 			"at %08lx nip %08lx lr %08lx code %x\n";
236 	const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \
237 			"at %016lx nip %016lx lr %016lx code %x\n";
238 
239 	if (!user_mode(regs)) {
240 		die("Exception in kernel mode", regs, signr);
241 		return;
242 	}
243 
244 	if (show_unhandled_signals && unhandled_signal(current, signr)) {
245 		printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
246 				   current->comm, current->pid, signr,
247 				   addr, regs->nip, regs->link, code);
248 	}
249 
250 	if (!arch_irq_disabled_regs(regs))
251 		local_irq_enable();
252 
253 	memset(&info, 0, sizeof(info));
254 	info.si_signo = signr;
255 	info.si_code = code;
256 	info.si_addr = (void __user *) addr;
257 	force_sig_info(signr, &info, current);
258 }
259 
260 #ifdef CONFIG_PPC64
261 void system_reset_exception(struct pt_regs *regs)
262 {
263 	/* See if any machine dependent calls */
264 	if (ppc_md.system_reset_exception) {
265 		if (ppc_md.system_reset_exception(regs))
266 			return;
267 	}
268 
269 	die("System Reset", regs, SIGABRT);
270 
271 	/* Must die if the interrupt is not recoverable */
272 	if (!(regs->msr & MSR_RI))
273 		panic("Unrecoverable System Reset");
274 
275 	/* What should we do here? We could issue a shutdown or hard reset. */
276 }
277 #endif
278 
279 /*
280  * I/O accesses can cause machine checks on powermacs.
281  * Check if the NIP corresponds to the address of a sync
282  * instruction for which there is an entry in the exception
283  * table.
284  * Note that the 601 only takes a machine check on TEA
285  * (transfer error ack) signal assertion, and does not
286  * set any of the top 16 bits of SRR1.
287  *  -- paulus.
288  */
289 static inline int check_io_access(struct pt_regs *regs)
290 {
291 #ifdef CONFIG_PPC32
292 	unsigned long msr = regs->msr;
293 	const struct exception_table_entry *entry;
294 	unsigned int *nip = (unsigned int *)regs->nip;
295 
296 	if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
297 	    && (entry = search_exception_tables(regs->nip)) != NULL) {
298 		/*
299 		 * Check that it's a sync instruction, or somewhere
300 		 * in the twi; isync; nop sequence that inb/inw/inl uses.
301 		 * As the address is in the exception table
302 		 * we should be able to read the instr there.
303 		 * For the debug message, we look at the preceding
304 		 * load or store.
305 		 */
306 		if (*nip == 0x60000000)		/* nop */
307 			nip -= 2;
308 		else if (*nip == 0x4c00012c)	/* isync */
309 			--nip;
310 		if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
311 			/* sync or twi */
312 			unsigned int rb;
313 
314 			--nip;
315 			rb = (*nip >> 11) & 0x1f;
316 			printk(KERN_DEBUG "%s bad port %lx at %p\n",
317 			       (*nip & 0x100)? "OUT to": "IN from",
318 			       regs->gpr[rb] - _IO_BASE, nip);
319 			regs->msr |= MSR_RI;
320 			regs->nip = entry->fixup;
321 			return 1;
322 		}
323 	}
324 #endif /* CONFIG_PPC32 */
325 	return 0;
326 }
327 
328 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
329 /* On 4xx, the reason for the machine check or program exception
330    is in the ESR. */
331 #define get_reason(regs)	((regs)->dsisr)
332 #ifndef CONFIG_FSL_BOOKE
333 #define get_mc_reason(regs)	((regs)->dsisr)
334 #else
335 #define get_mc_reason(regs)	(mfspr(SPRN_MCSR))
336 #endif
337 #define REASON_FP		ESR_FP
338 #define REASON_ILLEGAL		(ESR_PIL | ESR_PUO)
339 #define REASON_PRIVILEGED	ESR_PPR
340 #define REASON_TRAP		ESR_PTR
341 
342 /* single-step stuff */
343 #define single_stepping(regs)	(current->thread.dbcr0 & DBCR0_IC)
344 #define clear_single_step(regs)	(current->thread.dbcr0 &= ~DBCR0_IC)
345 
346 #else
347 /* On non-4xx, the reason for the machine check or program
348    exception is in the MSR. */
349 #define get_reason(regs)	((regs)->msr)
350 #define get_mc_reason(regs)	((regs)->msr)
351 #define REASON_FP		0x100000
352 #define REASON_ILLEGAL		0x80000
353 #define REASON_PRIVILEGED	0x40000
354 #define REASON_TRAP		0x20000
355 
356 #define single_stepping(regs)	((regs)->msr & MSR_SE)
357 #define clear_single_step(regs)	((regs)->msr &= ~MSR_SE)
358 #endif
359 
360 #if defined(CONFIG_4xx)
361 int machine_check_4xx(struct pt_regs *regs)
362 {
363 	unsigned long reason = get_mc_reason(regs);
364 
365 	if (reason & ESR_IMCP) {
366 		printk("Instruction");
367 		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
368 	} else
369 		printk("Data");
370 	printk(" machine check in kernel mode.\n");
371 
372 	return 0;
373 }
374 
375 int machine_check_440A(struct pt_regs *regs)
376 {
377 	unsigned long reason = get_mc_reason(regs);
378 
379 	printk("Machine check in kernel mode.\n");
380 	if (reason & ESR_IMCP){
381 		printk("Instruction Synchronous Machine Check exception\n");
382 		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
383 	}
384 	else {
385 		u32 mcsr = mfspr(SPRN_MCSR);
386 		if (mcsr & MCSR_IB)
387 			printk("Instruction Read PLB Error\n");
388 		if (mcsr & MCSR_DRB)
389 			printk("Data Read PLB Error\n");
390 		if (mcsr & MCSR_DWB)
391 			printk("Data Write PLB Error\n");
392 		if (mcsr & MCSR_TLBP)
393 			printk("TLB Parity Error\n");
394 		if (mcsr & MCSR_ICP){
395 			flush_instruction_cache();
396 			printk("I-Cache Parity Error\n");
397 		}
398 		if (mcsr & MCSR_DCSP)
399 			printk("D-Cache Search Parity Error\n");
400 		if (mcsr & MCSR_DCFP)
401 			printk("D-Cache Flush Parity Error\n");
402 		if (mcsr & MCSR_IMPE)
403 			printk("Machine Check exception is imprecise\n");
404 
405 		/* Clear MCSR */
406 		mtspr(SPRN_MCSR, mcsr);
407 	}
408 	return 0;
409 }
410 
411 int machine_check_47x(struct pt_regs *regs)
412 {
413 	unsigned long reason = get_mc_reason(regs);
414 	u32 mcsr;
415 
416 	printk(KERN_ERR "Machine check in kernel mode.\n");
417 	if (reason & ESR_IMCP) {
418 		printk(KERN_ERR
419 		       "Instruction Synchronous Machine Check exception\n");
420 		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
421 		return 0;
422 	}
423 	mcsr = mfspr(SPRN_MCSR);
424 	if (mcsr & MCSR_IB)
425 		printk(KERN_ERR "Instruction Read PLB Error\n");
426 	if (mcsr & MCSR_DRB)
427 		printk(KERN_ERR "Data Read PLB Error\n");
428 	if (mcsr & MCSR_DWB)
429 		printk(KERN_ERR "Data Write PLB Error\n");
430 	if (mcsr & MCSR_TLBP)
431 		printk(KERN_ERR "TLB Parity Error\n");
432 	if (mcsr & MCSR_ICP) {
433 		flush_instruction_cache();
434 		printk(KERN_ERR "I-Cache Parity Error\n");
435 	}
436 	if (mcsr & MCSR_DCSP)
437 		printk(KERN_ERR "D-Cache Search Parity Error\n");
438 	if (mcsr & PPC47x_MCSR_GPR)
439 		printk(KERN_ERR "GPR Parity Error\n");
440 	if (mcsr & PPC47x_MCSR_FPR)
441 		printk(KERN_ERR "FPR Parity Error\n");
442 	if (mcsr & PPC47x_MCSR_IPR)
443 		printk(KERN_ERR "Machine Check exception is imprecise\n");
444 
445 	/* Clear MCSR */
446 	mtspr(SPRN_MCSR, mcsr);
447 
448 	return 0;
449 }
450 #elif defined(CONFIG_E500)
451 int machine_check_e500mc(struct pt_regs *regs)
452 {
453 	unsigned long mcsr = mfspr(SPRN_MCSR);
454 	unsigned long reason = mcsr;
455 	int recoverable = 1;
456 
457 	if (reason & MCSR_LD) {
458 		recoverable = fsl_rio_mcheck_exception(regs);
459 		if (recoverable == 1)
460 			goto silent_out;
461 	}
462 
463 	printk("Machine check in kernel mode.\n");
464 	printk("Caused by (from MCSR=%lx): ", reason);
465 
466 	if (reason & MCSR_MCP)
467 		printk("Machine Check Signal\n");
468 
469 	if (reason & MCSR_ICPERR) {
470 		printk("Instruction Cache Parity Error\n");
471 
472 		/*
473 		 * This is recoverable by invalidating the i-cache.
474 		 */
475 		mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
476 		while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
477 			;
478 
479 		/*
480 		 * This will generally be accompanied by an instruction
481 		 * fetch error report -- only treat MCSR_IF as fatal
482 		 * if it wasn't due to an L1 parity error.
483 		 */
484 		reason &= ~MCSR_IF;
485 	}
486 
487 	if (reason & MCSR_DCPERR_MC) {
488 		printk("Data Cache Parity Error\n");
489 
490 		/*
491 		 * In write shadow mode we auto-recover from the error, but it
492 		 * may still get logged and cause a machine check.  We should
493 		 * only treat the non-write shadow case as non-recoverable.
494 		 */
495 		if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
496 			recoverable = 0;
497 	}
498 
499 	if (reason & MCSR_L2MMU_MHIT) {
500 		printk("Hit on multiple TLB entries\n");
501 		recoverable = 0;
502 	}
503 
504 	if (reason & MCSR_NMI)
505 		printk("Non-maskable interrupt\n");
506 
507 	if (reason & MCSR_IF) {
508 		printk("Instruction Fetch Error Report\n");
509 		recoverable = 0;
510 	}
511 
512 	if (reason & MCSR_LD) {
513 		printk("Load Error Report\n");
514 		recoverable = 0;
515 	}
516 
517 	if (reason & MCSR_ST) {
518 		printk("Store Error Report\n");
519 		recoverable = 0;
520 	}
521 
522 	if (reason & MCSR_LDG) {
523 		printk("Guarded Load Error Report\n");
524 		recoverable = 0;
525 	}
526 
527 	if (reason & MCSR_TLBSYNC)
528 		printk("Simultaneous tlbsync operations\n");
529 
530 	if (reason & MCSR_BSL2_ERR) {
531 		printk("Level 2 Cache Error\n");
532 		recoverable = 0;
533 	}
534 
535 	if (reason & MCSR_MAV) {
536 		u64 addr;
537 
538 		addr = mfspr(SPRN_MCAR);
539 		addr |= (u64)mfspr(SPRN_MCARU) << 32;
540 
541 		printk("Machine Check %s Address: %#llx\n",
542 		       reason & MCSR_MEA ? "Effective" : "Physical", addr);
543 	}
544 
545 silent_out:
546 	mtspr(SPRN_MCSR, mcsr);
547 	return mfspr(SPRN_MCSR) == 0 && recoverable;
548 }
549 
550 int machine_check_e500(struct pt_regs *regs)
551 {
552 	unsigned long reason = get_mc_reason(regs);
553 
554 	if (reason & MCSR_BUS_RBERR) {
555 		if (fsl_rio_mcheck_exception(regs))
556 			return 1;
557 	}
558 
559 	printk("Machine check in kernel mode.\n");
560 	printk("Caused by (from MCSR=%lx): ", reason);
561 
562 	if (reason & MCSR_MCP)
563 		printk("Machine Check Signal\n");
564 	if (reason & MCSR_ICPERR)
565 		printk("Instruction Cache Parity Error\n");
566 	if (reason & MCSR_DCP_PERR)
567 		printk("Data Cache Push Parity Error\n");
568 	if (reason & MCSR_DCPERR)
569 		printk("Data Cache Parity Error\n");
570 	if (reason & MCSR_BUS_IAERR)
571 		printk("Bus - Instruction Address Error\n");
572 	if (reason & MCSR_BUS_RAERR)
573 		printk("Bus - Read Address Error\n");
574 	if (reason & MCSR_BUS_WAERR)
575 		printk("Bus - Write Address Error\n");
576 	if (reason & MCSR_BUS_IBERR)
577 		printk("Bus - Instruction Data Error\n");
578 	if (reason & MCSR_BUS_RBERR)
579 		printk("Bus - Read Data Bus Error\n");
580 	if (reason & MCSR_BUS_WBERR)
581 		printk("Bus - Read Data Bus Error\n");
582 	if (reason & MCSR_BUS_IPERR)
583 		printk("Bus - Instruction Parity Error\n");
584 	if (reason & MCSR_BUS_RPERR)
585 		printk("Bus - Read Parity Error\n");
586 
587 	return 0;
588 }
589 
590 int machine_check_generic(struct pt_regs *regs)
591 {
592 	return 0;
593 }
594 #elif defined(CONFIG_E200)
595 int machine_check_e200(struct pt_regs *regs)
596 {
597 	unsigned long reason = get_mc_reason(regs);
598 
599 	printk("Machine check in kernel mode.\n");
600 	printk("Caused by (from MCSR=%lx): ", reason);
601 
602 	if (reason & MCSR_MCP)
603 		printk("Machine Check Signal\n");
604 	if (reason & MCSR_CP_PERR)
605 		printk("Cache Push Parity Error\n");
606 	if (reason & MCSR_CPERR)
607 		printk("Cache Parity Error\n");
608 	if (reason & MCSR_EXCP_ERR)
609 		printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
610 	if (reason & MCSR_BUS_IRERR)
611 		printk("Bus - Read Bus Error on instruction fetch\n");
612 	if (reason & MCSR_BUS_DRERR)
613 		printk("Bus - Read Bus Error on data load\n");
614 	if (reason & MCSR_BUS_WRERR)
615 		printk("Bus - Write Bus Error on buffered store or cache line push\n");
616 
617 	return 0;
618 }
619 #else
620 int machine_check_generic(struct pt_regs *regs)
621 {
622 	unsigned long reason = get_mc_reason(regs);
623 
624 	printk("Machine check in kernel mode.\n");
625 	printk("Caused by (from SRR1=%lx): ", reason);
626 	switch (reason & 0x601F0000) {
627 	case 0x80000:
628 		printk("Machine check signal\n");
629 		break;
630 	case 0:		/* for 601 */
631 	case 0x40000:
632 	case 0x140000:	/* 7450 MSS error and TEA */
633 		printk("Transfer error ack signal\n");
634 		break;
635 	case 0x20000:
636 		printk("Data parity error signal\n");
637 		break;
638 	case 0x10000:
639 		printk("Address parity error signal\n");
640 		break;
641 	case 0x20000000:
642 		printk("L1 Data Cache error\n");
643 		break;
644 	case 0x40000000:
645 		printk("L1 Instruction Cache error\n");
646 		break;
647 	case 0x00100000:
648 		printk("L2 data cache parity error\n");
649 		break;
650 	default:
651 		printk("Unknown values in msr\n");
652 	}
653 	return 0;
654 }
655 #endif /* everything else */
656 
657 void machine_check_exception(struct pt_regs *regs)
658 {
659 	int recover = 0;
660 
661 	__get_cpu_var(irq_stat).mce_exceptions++;
662 
663 	/* See if any machine dependent calls. In theory, we would want
664 	 * to call the CPU first, and call the ppc_md. one if the CPU
665 	 * one returns a positive number. However there is existing code
666 	 * that assumes the board gets a first chance, so let's keep it
667 	 * that way for now and fix things later. --BenH.
668 	 */
669 	if (ppc_md.machine_check_exception)
670 		recover = ppc_md.machine_check_exception(regs);
671 	else if (cur_cpu_spec->machine_check)
672 		recover = cur_cpu_spec->machine_check(regs);
673 
674 	if (recover > 0)
675 		return;
676 
677 #if defined(CONFIG_8xx) && defined(CONFIG_PCI)
678 	/* the qspan pci read routines can cause machine checks -- Cort
679 	 *
680 	 * yuck !!! that totally needs to go away ! There are better ways
681 	 * to deal with that than having a wart in the mcheck handler.
682 	 * -- BenH
683 	 */
684 	bad_page_fault(regs, regs->dar, SIGBUS);
685 	return;
686 #endif
687 
688 	if (debugger_fault_handler(regs))
689 		return;
690 
691 	if (check_io_access(regs))
692 		return;
693 
694 	die("Machine check", regs, SIGBUS);
695 
696 	/* Must die if the interrupt is not recoverable */
697 	if (!(regs->msr & MSR_RI))
698 		panic("Unrecoverable Machine check");
699 }
700 
701 void SMIException(struct pt_regs *regs)
702 {
703 	die("System Management Interrupt", regs, SIGABRT);
704 }
705 
706 void unknown_exception(struct pt_regs *regs)
707 {
708 	printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
709 	       regs->nip, regs->msr, regs->trap);
710 
711 	_exception(SIGTRAP, regs, 0, 0);
712 }
713 
714 void instruction_breakpoint_exception(struct pt_regs *regs)
715 {
716 	if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
717 					5, SIGTRAP) == NOTIFY_STOP)
718 		return;
719 	if (debugger_iabr_match(regs))
720 		return;
721 	_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
722 }
723 
724 void RunModeException(struct pt_regs *regs)
725 {
726 	_exception(SIGTRAP, regs, 0, 0);
727 }
728 
729 void __kprobes single_step_exception(struct pt_regs *regs)
730 {
731 	clear_single_step(regs);
732 
733 	if (notify_die(DIE_SSTEP, "single_step", regs, 5,
734 					5, SIGTRAP) == NOTIFY_STOP)
735 		return;
736 	if (debugger_sstep(regs))
737 		return;
738 
739 	_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
740 }
741 
742 /*
743  * After we have successfully emulated an instruction, we have to
744  * check if the instruction was being single-stepped, and if so,
745  * pretend we got a single-step exception.  This was pointed out
746  * by Kumar Gala.  -- paulus
747  */
748 static void emulate_single_step(struct pt_regs *regs)
749 {
750 	if (single_stepping(regs))
751 		single_step_exception(regs);
752 }
753 
754 static inline int __parse_fpscr(unsigned long fpscr)
755 {
756 	int ret = 0;
757 
758 	/* Invalid operation */
759 	if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
760 		ret = FPE_FLTINV;
761 
762 	/* Overflow */
763 	else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
764 		ret = FPE_FLTOVF;
765 
766 	/* Underflow */
767 	else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
768 		ret = FPE_FLTUND;
769 
770 	/* Divide by zero */
771 	else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
772 		ret = FPE_FLTDIV;
773 
774 	/* Inexact result */
775 	else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
776 		ret = FPE_FLTRES;
777 
778 	return ret;
779 }
780 
781 static void parse_fpe(struct pt_regs *regs)
782 {
783 	int code = 0;
784 
785 	flush_fp_to_thread(current);
786 
787 	code = __parse_fpscr(current->thread.fpscr.val);
788 
789 	_exception(SIGFPE, regs, code, regs->nip);
790 }
791 
792 /*
793  * Illegal instruction emulation support.  Originally written to
794  * provide the PVR to user applications using the mfspr rd, PVR.
795  * Return non-zero if we can't emulate, or -EFAULT if the associated
796  * memory access caused an access fault.  Return zero on success.
797  *
798  * There are a couple of ways to do this, either "decode" the instruction
799  * or directly match lots of bits.  In this case, matching lots of
800  * bits is faster and easier.
801  *
802  */
803 static int emulate_string_inst(struct pt_regs *regs, u32 instword)
804 {
805 	u8 rT = (instword >> 21) & 0x1f;
806 	u8 rA = (instword >> 16) & 0x1f;
807 	u8 NB_RB = (instword >> 11) & 0x1f;
808 	u32 num_bytes;
809 	unsigned long EA;
810 	int pos = 0;
811 
812 	/* Early out if we are an invalid form of lswx */
813 	if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
814 		if ((rT == rA) || (rT == NB_RB))
815 			return -EINVAL;
816 
817 	EA = (rA == 0) ? 0 : regs->gpr[rA];
818 
819 	switch (instword & PPC_INST_STRING_MASK) {
820 		case PPC_INST_LSWX:
821 		case PPC_INST_STSWX:
822 			EA += NB_RB;
823 			num_bytes = regs->xer & 0x7f;
824 			break;
825 		case PPC_INST_LSWI:
826 		case PPC_INST_STSWI:
827 			num_bytes = (NB_RB == 0) ? 32 : NB_RB;
828 			break;
829 		default:
830 			return -EINVAL;
831 	}
832 
833 	while (num_bytes != 0)
834 	{
835 		u8 val;
836 		u32 shift = 8 * (3 - (pos & 0x3));
837 
838 		switch ((instword & PPC_INST_STRING_MASK)) {
839 			case PPC_INST_LSWX:
840 			case PPC_INST_LSWI:
841 				if (get_user(val, (u8 __user *)EA))
842 					return -EFAULT;
843 				/* first time updating this reg,
844 				 * zero it out */
845 				if (pos == 0)
846 					regs->gpr[rT] = 0;
847 				regs->gpr[rT] |= val << shift;
848 				break;
849 			case PPC_INST_STSWI:
850 			case PPC_INST_STSWX:
851 				val = regs->gpr[rT] >> shift;
852 				if (put_user(val, (u8 __user *)EA))
853 					return -EFAULT;
854 				break;
855 		}
856 		/* move EA to next address */
857 		EA += 1;
858 		num_bytes--;
859 
860 		/* manage our position within the register */
861 		if (++pos == 4) {
862 			pos = 0;
863 			if (++rT == 32)
864 				rT = 0;
865 		}
866 	}
867 
868 	return 0;
869 }
870 
871 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
872 {
873 	u32 ra,rs;
874 	unsigned long tmp;
875 
876 	ra = (instword >> 16) & 0x1f;
877 	rs = (instword >> 21) & 0x1f;
878 
879 	tmp = regs->gpr[rs];
880 	tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
881 	tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
882 	tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
883 	regs->gpr[ra] = tmp;
884 
885 	return 0;
886 }
887 
888 static int emulate_isel(struct pt_regs *regs, u32 instword)
889 {
890 	u8 rT = (instword >> 21) & 0x1f;
891 	u8 rA = (instword >> 16) & 0x1f;
892 	u8 rB = (instword >> 11) & 0x1f;
893 	u8 BC = (instword >> 6) & 0x1f;
894 	u8 bit;
895 	unsigned long tmp;
896 
897 	tmp = (rA == 0) ? 0 : regs->gpr[rA];
898 	bit = (regs->ccr >> (31 - BC)) & 0x1;
899 
900 	regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
901 
902 	return 0;
903 }
904 
905 static int emulate_instruction(struct pt_regs *regs)
906 {
907 	u32 instword;
908 	u32 rd;
909 
910 	if (!user_mode(regs) || (regs->msr & MSR_LE))
911 		return -EINVAL;
912 	CHECK_FULL_REGS(regs);
913 
914 	if (get_user(instword, (u32 __user *)(regs->nip)))
915 		return -EFAULT;
916 
917 	/* Emulate the mfspr rD, PVR. */
918 	if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
919 		PPC_WARN_EMULATED(mfpvr, regs);
920 		rd = (instword >> 21) & 0x1f;
921 		regs->gpr[rd] = mfspr(SPRN_PVR);
922 		return 0;
923 	}
924 
925 	/* Emulating the dcba insn is just a no-op.  */
926 	if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
927 		PPC_WARN_EMULATED(dcba, regs);
928 		return 0;
929 	}
930 
931 	/* Emulate the mcrxr insn.  */
932 	if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
933 		int shift = (instword >> 21) & 0x1c;
934 		unsigned long msk = 0xf0000000UL >> shift;
935 
936 		PPC_WARN_EMULATED(mcrxr, regs);
937 		regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
938 		regs->xer &= ~0xf0000000UL;
939 		return 0;
940 	}
941 
942 	/* Emulate load/store string insn. */
943 	if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
944 		PPC_WARN_EMULATED(string, regs);
945 		return emulate_string_inst(regs, instword);
946 	}
947 
948 	/* Emulate the popcntb (Population Count Bytes) instruction. */
949 	if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
950 		PPC_WARN_EMULATED(popcntb, regs);
951 		return emulate_popcntb_inst(regs, instword);
952 	}
953 
954 	/* Emulate isel (Integer Select) instruction */
955 	if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
956 		PPC_WARN_EMULATED(isel, regs);
957 		return emulate_isel(regs, instword);
958 	}
959 
960 #ifdef CONFIG_PPC64
961 	/* Emulate the mfspr rD, DSCR. */
962 	if (((instword & PPC_INST_MFSPR_DSCR_MASK) == PPC_INST_MFSPR_DSCR) &&
963 			cpu_has_feature(CPU_FTR_DSCR)) {
964 		PPC_WARN_EMULATED(mfdscr, regs);
965 		rd = (instword >> 21) & 0x1f;
966 		regs->gpr[rd] = mfspr(SPRN_DSCR);
967 		return 0;
968 	}
969 	/* Emulate the mtspr DSCR, rD. */
970 	if (((instword & PPC_INST_MTSPR_DSCR_MASK) == PPC_INST_MTSPR_DSCR) &&
971 			cpu_has_feature(CPU_FTR_DSCR)) {
972 		PPC_WARN_EMULATED(mtdscr, regs);
973 		rd = (instword >> 21) & 0x1f;
974 		mtspr(SPRN_DSCR, regs->gpr[rd]);
975 		current->thread.dscr_inherit = 1;
976 		return 0;
977 	}
978 #endif
979 
980 	return -EINVAL;
981 }
982 
983 int is_valid_bugaddr(unsigned long addr)
984 {
985 	return is_kernel_addr(addr);
986 }
987 
988 void __kprobes program_check_exception(struct pt_regs *regs)
989 {
990 	unsigned int reason = get_reason(regs);
991 	extern int do_mathemu(struct pt_regs *regs);
992 
993 	/* We can now get here via a FP Unavailable exception if the core
994 	 * has no FPU, in that case the reason flags will be 0 */
995 
996 	if (reason & REASON_FP) {
997 		/* IEEE FP exception */
998 		parse_fpe(regs);
999 		return;
1000 	}
1001 	if (reason & REASON_TRAP) {
1002 		/* Debugger is first in line to stop recursive faults in
1003 		 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1004 		if (debugger_bpt(regs))
1005 			return;
1006 
1007 		/* trap exception */
1008 		if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
1009 				== NOTIFY_STOP)
1010 			return;
1011 
1012 		if (!(regs->msr & MSR_PR) &&  /* not user-mode */
1013 		    report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
1014 			regs->nip += 4;
1015 			return;
1016 		}
1017 		_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1018 		return;
1019 	}
1020 
1021 	local_irq_enable();
1022 
1023 #ifdef CONFIG_MATH_EMULATION
1024 	/* (reason & REASON_ILLEGAL) would be the obvious thing here,
1025 	 * but there seems to be a hardware bug on the 405GP (RevD)
1026 	 * that means ESR is sometimes set incorrectly - either to
1027 	 * ESR_DST (!?) or 0.  In the process of chasing this with the
1028 	 * hardware people - not sure if it can happen on any illegal
1029 	 * instruction or only on FP instructions, whether there is a
1030 	 * pattern to occurrences etc. -dgibson 31/Mar/2003 */
1031 	switch (do_mathemu(regs)) {
1032 	case 0:
1033 		emulate_single_step(regs);
1034 		return;
1035 	case 1: {
1036 			int code = 0;
1037 			code = __parse_fpscr(current->thread.fpscr.val);
1038 			_exception(SIGFPE, regs, code, regs->nip);
1039 			return;
1040 		}
1041 	case -EFAULT:
1042 		_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1043 		return;
1044 	}
1045 	/* fall through on any other errors */
1046 #endif /* CONFIG_MATH_EMULATION */
1047 
1048 	/* Try to emulate it if we should. */
1049 	if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
1050 		switch (emulate_instruction(regs)) {
1051 		case 0:
1052 			regs->nip += 4;
1053 			emulate_single_step(regs);
1054 			return;
1055 		case -EFAULT:
1056 			_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1057 			return;
1058 		}
1059 	}
1060 
1061 	if (reason & REASON_PRIVILEGED)
1062 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1063 	else
1064 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1065 }
1066 
1067 void alignment_exception(struct pt_regs *regs)
1068 {
1069 	int sig, code, fixed = 0;
1070 
1071 	/* we don't implement logging of alignment exceptions */
1072 	if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
1073 		fixed = fix_alignment(regs);
1074 
1075 	if (fixed == 1) {
1076 		regs->nip += 4;	/* skip over emulated instruction */
1077 		emulate_single_step(regs);
1078 		return;
1079 	}
1080 
1081 	/* Operand address was bad */
1082 	if (fixed == -EFAULT) {
1083 		sig = SIGSEGV;
1084 		code = SEGV_ACCERR;
1085 	} else {
1086 		sig = SIGBUS;
1087 		code = BUS_ADRALN;
1088 	}
1089 	if (user_mode(regs))
1090 		_exception(sig, regs, code, regs->dar);
1091 	else
1092 		bad_page_fault(regs, regs->dar, sig);
1093 }
1094 
1095 void StackOverflow(struct pt_regs *regs)
1096 {
1097 	printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
1098 	       current, regs->gpr[1]);
1099 	debugger(regs);
1100 	show_regs(regs);
1101 	panic("kernel stack overflow");
1102 }
1103 
1104 void nonrecoverable_exception(struct pt_regs *regs)
1105 {
1106 	printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
1107 	       regs->nip, regs->msr);
1108 	debugger(regs);
1109 	die("nonrecoverable exception", regs, SIGKILL);
1110 }
1111 
1112 void trace_syscall(struct pt_regs *regs)
1113 {
1114 	printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld    %s\n",
1115 	       current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0],
1116 	       regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
1117 }
1118 
1119 void kernel_fp_unavailable_exception(struct pt_regs *regs)
1120 {
1121 	printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
1122 			  "%lx at %lx\n", regs->trap, regs->nip);
1123 	die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
1124 }
1125 
1126 void altivec_unavailable_exception(struct pt_regs *regs)
1127 {
1128 	if (user_mode(regs)) {
1129 		/* A user program has executed an altivec instruction,
1130 		   but this kernel doesn't support altivec. */
1131 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1132 		return;
1133 	}
1134 
1135 	printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
1136 			"%lx at %lx\n", regs->trap, regs->nip);
1137 	die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
1138 }
1139 
1140 void vsx_unavailable_exception(struct pt_regs *regs)
1141 {
1142 	if (user_mode(regs)) {
1143 		/* A user program has executed an vsx instruction,
1144 		   but this kernel doesn't support vsx. */
1145 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1146 		return;
1147 	}
1148 
1149 	printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
1150 			"%lx at %lx\n", regs->trap, regs->nip);
1151 	die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
1152 }
1153 
1154 void performance_monitor_exception(struct pt_regs *regs)
1155 {
1156 	__get_cpu_var(irq_stat).pmu_irqs++;
1157 
1158 	perf_irq(regs);
1159 }
1160 
1161 #ifdef CONFIG_8xx
1162 void SoftwareEmulation(struct pt_regs *regs)
1163 {
1164 	extern int do_mathemu(struct pt_regs *);
1165 	extern int Soft_emulate_8xx(struct pt_regs *);
1166 #if defined(CONFIG_MATH_EMULATION) || defined(CONFIG_8XX_MINIMAL_FPEMU)
1167 	int errcode;
1168 #endif
1169 
1170 	CHECK_FULL_REGS(regs);
1171 
1172 	if (!user_mode(regs)) {
1173 		debugger(regs);
1174 		die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
1175 	}
1176 
1177 #ifdef CONFIG_MATH_EMULATION
1178 	errcode = do_mathemu(regs);
1179 	if (errcode >= 0)
1180 		PPC_WARN_EMULATED(math, regs);
1181 
1182 	switch (errcode) {
1183 	case 0:
1184 		emulate_single_step(regs);
1185 		return;
1186 	case 1: {
1187 			int code = 0;
1188 			code = __parse_fpscr(current->thread.fpscr.val);
1189 			_exception(SIGFPE, regs, code, regs->nip);
1190 			return;
1191 		}
1192 	case -EFAULT:
1193 		_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1194 		return;
1195 	default:
1196 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1197 		return;
1198 	}
1199 
1200 #elif defined(CONFIG_8XX_MINIMAL_FPEMU)
1201 	errcode = Soft_emulate_8xx(regs);
1202 	if (errcode >= 0)
1203 		PPC_WARN_EMULATED(8xx, regs);
1204 
1205 	switch (errcode) {
1206 	case 0:
1207 		emulate_single_step(regs);
1208 		return;
1209 	case 1:
1210 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1211 		return;
1212 	case -EFAULT:
1213 		_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1214 		return;
1215 	}
1216 #else
1217 	_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1218 #endif
1219 }
1220 #endif /* CONFIG_8xx */
1221 
1222 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1223 static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
1224 {
1225 	int changed = 0;
1226 	/*
1227 	 * Determine the cause of the debug event, clear the
1228 	 * event flags and send a trap to the handler. Torez
1229 	 */
1230 	if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1231 		dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1232 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1233 		current->thread.dbcr2 &= ~DBCR2_DAC12MODE;
1234 #endif
1235 		do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT,
1236 			     5);
1237 		changed |= 0x01;
1238 	}  else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
1239 		dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
1240 		do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT,
1241 			     6);
1242 		changed |= 0x01;
1243 	}  else if (debug_status & DBSR_IAC1) {
1244 		current->thread.dbcr0 &= ~DBCR0_IAC1;
1245 		dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
1246 		do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT,
1247 			     1);
1248 		changed |= 0x01;
1249 	}  else if (debug_status & DBSR_IAC2) {
1250 		current->thread.dbcr0 &= ~DBCR0_IAC2;
1251 		do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT,
1252 			     2);
1253 		changed |= 0x01;
1254 	}  else if (debug_status & DBSR_IAC3) {
1255 		current->thread.dbcr0 &= ~DBCR0_IAC3;
1256 		dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
1257 		do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT,
1258 			     3);
1259 		changed |= 0x01;
1260 	}  else if (debug_status & DBSR_IAC4) {
1261 		current->thread.dbcr0 &= ~DBCR0_IAC4;
1262 		do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT,
1263 			     4);
1264 		changed |= 0x01;
1265 	}
1266 	/*
1267 	 * At the point this routine was called, the MSR(DE) was turned off.
1268 	 * Check all other debug flags and see if that bit needs to be turned
1269 	 * back on or not.
1270 	 */
1271 	if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, current->thread.dbcr1))
1272 		regs->msr |= MSR_DE;
1273 	else
1274 		/* Make sure the IDM flag is off */
1275 		current->thread.dbcr0 &= ~DBCR0_IDM;
1276 
1277 	if (changed & 0x01)
1278 		mtspr(SPRN_DBCR0, current->thread.dbcr0);
1279 }
1280 
1281 void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
1282 {
1283 	current->thread.dbsr = debug_status;
1284 
1285 	/* Hack alert: On BookE, Branch Taken stops on the branch itself, while
1286 	 * on server, it stops on the target of the branch. In order to simulate
1287 	 * the server behaviour, we thus restart right away with a single step
1288 	 * instead of stopping here when hitting a BT
1289 	 */
1290 	if (debug_status & DBSR_BT) {
1291 		regs->msr &= ~MSR_DE;
1292 
1293 		/* Disable BT */
1294 		mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
1295 		/* Clear the BT event */
1296 		mtspr(SPRN_DBSR, DBSR_BT);
1297 
1298 		/* Do the single step trick only when coming from userspace */
1299 		if (user_mode(regs)) {
1300 			current->thread.dbcr0 &= ~DBCR0_BT;
1301 			current->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1302 			regs->msr |= MSR_DE;
1303 			return;
1304 		}
1305 
1306 		if (notify_die(DIE_SSTEP, "block_step", regs, 5,
1307 			       5, SIGTRAP) == NOTIFY_STOP) {
1308 			return;
1309 		}
1310 		if (debugger_sstep(regs))
1311 			return;
1312 	} else if (debug_status & DBSR_IC) { 	/* Instruction complete */
1313 		regs->msr &= ~MSR_DE;
1314 
1315 		/* Disable instruction completion */
1316 		mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
1317 		/* Clear the instruction completion event */
1318 		mtspr(SPRN_DBSR, DBSR_IC);
1319 
1320 		if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1321 			       5, SIGTRAP) == NOTIFY_STOP) {
1322 			return;
1323 		}
1324 
1325 		if (debugger_sstep(regs))
1326 			return;
1327 
1328 		if (user_mode(regs)) {
1329 			current->thread.dbcr0 &= ~DBCR0_IC;
1330 			if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0,
1331 					       current->thread.dbcr1))
1332 				regs->msr |= MSR_DE;
1333 			else
1334 				/* Make sure the IDM bit is off */
1335 				current->thread.dbcr0 &= ~DBCR0_IDM;
1336 		}
1337 
1338 		_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1339 	} else
1340 		handle_debug(regs, debug_status);
1341 }
1342 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1343 
1344 #if !defined(CONFIG_TAU_INT)
1345 void TAUException(struct pt_regs *regs)
1346 {
1347 	printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx    %s\n",
1348 	       regs->nip, regs->msr, regs->trap, print_tainted());
1349 }
1350 #endif /* CONFIG_INT_TAU */
1351 
1352 #ifdef CONFIG_ALTIVEC
1353 void altivec_assist_exception(struct pt_regs *regs)
1354 {
1355 	int err;
1356 
1357 	if (!user_mode(regs)) {
1358 		printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
1359 		       " at %lx\n", regs->nip);
1360 		die("Kernel VMX/Altivec assist exception", regs, SIGILL);
1361 	}
1362 
1363 	flush_altivec_to_thread(current);
1364 
1365 	PPC_WARN_EMULATED(altivec, regs);
1366 	err = emulate_altivec(regs);
1367 	if (err == 0) {
1368 		regs->nip += 4;		/* skip emulated instruction */
1369 		emulate_single_step(regs);
1370 		return;
1371 	}
1372 
1373 	if (err == -EFAULT) {
1374 		/* got an error reading the instruction */
1375 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1376 	} else {
1377 		/* didn't recognize the instruction */
1378 		/* XXX quick hack for now: set the non-Java bit in the VSCR */
1379 		printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
1380 				   "in %s at %lx\n", current->comm, regs->nip);
1381 		current->thread.vscr.u[3] |= 0x10000;
1382 	}
1383 }
1384 #endif /* CONFIG_ALTIVEC */
1385 
1386 #ifdef CONFIG_VSX
1387 void vsx_assist_exception(struct pt_regs *regs)
1388 {
1389 	if (!user_mode(regs)) {
1390 		printk(KERN_EMERG "VSX assist exception in kernel mode"
1391 		       " at %lx\n", regs->nip);
1392 		die("Kernel VSX assist exception", regs, SIGILL);
1393 	}
1394 
1395 	flush_vsx_to_thread(current);
1396 	printk(KERN_INFO "VSX assist not supported at %lx\n", regs->nip);
1397 	_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1398 }
1399 #endif /* CONFIG_VSX */
1400 
1401 #ifdef CONFIG_FSL_BOOKE
1402 void CacheLockingException(struct pt_regs *regs, unsigned long address,
1403 			   unsigned long error_code)
1404 {
1405 	/* We treat cache locking instructions from the user
1406 	 * as priv ops, in the future we could try to do
1407 	 * something smarter
1408 	 */
1409 	if (error_code & (ESR_DLK|ESR_ILK))
1410 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1411 	return;
1412 }
1413 #endif /* CONFIG_FSL_BOOKE */
1414 
1415 #ifdef CONFIG_SPE
1416 void SPEFloatingPointException(struct pt_regs *regs)
1417 {
1418 	extern int do_spe_mathemu(struct pt_regs *regs);
1419 	unsigned long spefscr;
1420 	int fpexc_mode;
1421 	int code = 0;
1422 	int err;
1423 
1424 	flush_spe_to_thread(current);
1425 
1426 	spefscr = current->thread.spefscr;
1427 	fpexc_mode = current->thread.fpexc_mode;
1428 
1429 	if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1430 		code = FPE_FLTOVF;
1431 	}
1432 	else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1433 		code = FPE_FLTUND;
1434 	}
1435 	else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1436 		code = FPE_FLTDIV;
1437 	else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1438 		code = FPE_FLTINV;
1439 	}
1440 	else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1441 		code = FPE_FLTRES;
1442 
1443 	err = do_spe_mathemu(regs);
1444 	if (err == 0) {
1445 		regs->nip += 4;		/* skip emulated instruction */
1446 		emulate_single_step(regs);
1447 		return;
1448 	}
1449 
1450 	if (err == -EFAULT) {
1451 		/* got an error reading the instruction */
1452 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1453 	} else if (err == -EINVAL) {
1454 		/* didn't recognize the instruction */
1455 		printk(KERN_ERR "unrecognized spe instruction "
1456 		       "in %s at %lx\n", current->comm, regs->nip);
1457 	} else {
1458 		_exception(SIGFPE, regs, code, regs->nip);
1459 	}
1460 
1461 	return;
1462 }
1463 
1464 void SPEFloatingPointRoundException(struct pt_regs *regs)
1465 {
1466 	extern int speround_handler(struct pt_regs *regs);
1467 	int err;
1468 
1469 	preempt_disable();
1470 	if (regs->msr & MSR_SPE)
1471 		giveup_spe(current);
1472 	preempt_enable();
1473 
1474 	regs->nip -= 4;
1475 	err = speround_handler(regs);
1476 	if (err == 0) {
1477 		regs->nip += 4;		/* skip emulated instruction */
1478 		emulate_single_step(regs);
1479 		return;
1480 	}
1481 
1482 	if (err == -EFAULT) {
1483 		/* got an error reading the instruction */
1484 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1485 	} else if (err == -EINVAL) {
1486 		/* didn't recognize the instruction */
1487 		printk(KERN_ERR "unrecognized spe instruction "
1488 		       "in %s at %lx\n", current->comm, regs->nip);
1489 	} else {
1490 		_exception(SIGFPE, regs, 0, regs->nip);
1491 		return;
1492 	}
1493 }
1494 #endif
1495 
1496 /*
1497  * We enter here if we get an unrecoverable exception, that is, one
1498  * that happened at a point where the RI (recoverable interrupt) bit
1499  * in the MSR is 0.  This indicates that SRR0/1 are live, and that
1500  * we therefore lost state by taking this exception.
1501  */
1502 void unrecoverable_exception(struct pt_regs *regs)
1503 {
1504 	printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1505 	       regs->trap, regs->nip);
1506 	die("Unrecoverable exception", regs, SIGABRT);
1507 }
1508 
1509 #ifdef CONFIG_BOOKE_WDT
1510 /*
1511  * Default handler for a Watchdog exception,
1512  * spins until a reboot occurs
1513  */
1514 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1515 {
1516 	/* Generic WatchdogHandler, implement your own */
1517 	mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1518 	return;
1519 }
1520 
1521 void WatchdogException(struct pt_regs *regs)
1522 {
1523 	printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1524 	WatchdogHandler(regs);
1525 }
1526 #endif
1527 
1528 /*
1529  * We enter here if we discover during exception entry that we are
1530  * running in supervisor mode with a userspace value in the stack pointer.
1531  */
1532 void kernel_bad_stack(struct pt_regs *regs)
1533 {
1534 	printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1535 	       regs->gpr[1], regs->nip);
1536 	die("Bad kernel stack pointer", regs, SIGABRT);
1537 }
1538 
1539 void __init trap_init(void)
1540 {
1541 }
1542 
1543 
1544 #ifdef CONFIG_PPC_EMULATED_STATS
1545 
1546 #define WARN_EMULATED_SETUP(type)	.type = { .name = #type }
1547 
1548 struct ppc_emulated ppc_emulated = {
1549 #ifdef CONFIG_ALTIVEC
1550 	WARN_EMULATED_SETUP(altivec),
1551 #endif
1552 	WARN_EMULATED_SETUP(dcba),
1553 	WARN_EMULATED_SETUP(dcbz),
1554 	WARN_EMULATED_SETUP(fp_pair),
1555 	WARN_EMULATED_SETUP(isel),
1556 	WARN_EMULATED_SETUP(mcrxr),
1557 	WARN_EMULATED_SETUP(mfpvr),
1558 	WARN_EMULATED_SETUP(multiple),
1559 	WARN_EMULATED_SETUP(popcntb),
1560 	WARN_EMULATED_SETUP(spe),
1561 	WARN_EMULATED_SETUP(string),
1562 	WARN_EMULATED_SETUP(unaligned),
1563 #ifdef CONFIG_MATH_EMULATION
1564 	WARN_EMULATED_SETUP(math),
1565 #elif defined(CONFIG_8XX_MINIMAL_FPEMU)
1566 	WARN_EMULATED_SETUP(8xx),
1567 #endif
1568 #ifdef CONFIG_VSX
1569 	WARN_EMULATED_SETUP(vsx),
1570 #endif
1571 #ifdef CONFIG_PPC64
1572 	WARN_EMULATED_SETUP(mfdscr),
1573 	WARN_EMULATED_SETUP(mtdscr),
1574 #endif
1575 };
1576 
1577 u32 ppc_warn_emulated;
1578 
1579 void ppc_warn_emulated_print(const char *type)
1580 {
1581 	pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
1582 			    type);
1583 }
1584 
1585 static int __init ppc_warn_emulated_init(void)
1586 {
1587 	struct dentry *dir, *d;
1588 	unsigned int i;
1589 	struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
1590 
1591 	if (!powerpc_debugfs_root)
1592 		return -ENODEV;
1593 
1594 	dir = debugfs_create_dir("emulated_instructions",
1595 				 powerpc_debugfs_root);
1596 	if (!dir)
1597 		return -ENOMEM;
1598 
1599 	d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir,
1600 			       &ppc_warn_emulated);
1601 	if (!d)
1602 		goto fail;
1603 
1604 	for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
1605 		d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir,
1606 				       (u32 *)&entries[i].val.counter);
1607 		if (!d)
1608 			goto fail;
1609 	}
1610 
1611 	return 0;
1612 
1613 fail:
1614 	debugfs_remove_recursive(dir);
1615 	return -ENOMEM;
1616 }
1617 
1618 device_initcall(ppc_warn_emulated_init);
1619 
1620 #endif /* CONFIG_PPC_EMULATED_STATS */
1621