xref: /linux/arch/powerpc/kernel/traps.c (revision 98c45f51f767bfdd71d773cceaceb403352e51ae)
1 /*
2  *  Copyright (C) 1995-1996  Gary Thomas (gdt@linuxppc.org)
3  *  Copyright 2007-2010 Freescale Semiconductor, Inc.
4  *
5  *  This program is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU General Public License
7  *  as published by the Free Software Foundation; either version
8  *  2 of the License, or (at your option) any later version.
9  *
10  *  Modified by Cort Dougan (cort@cs.nmt.edu)
11  *  and Paul Mackerras (paulus@samba.org)
12  */
13 
14 /*
15  * This file handles the architecture-dependent parts of hardware exceptions
16  */
17 
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/sched/debug.h>
21 #include <linux/kernel.h>
22 #include <linux/mm.h>
23 #include <linux/pkeys.h>
24 #include <linux/stddef.h>
25 #include <linux/unistd.h>
26 #include <linux/ptrace.h>
27 #include <linux/user.h>
28 #include <linux/interrupt.h>
29 #include <linux/init.h>
30 #include <linux/extable.h>
31 #include <linux/module.h>	/* print_modules */
32 #include <linux/prctl.h>
33 #include <linux/delay.h>
34 #include <linux/kprobes.h>
35 #include <linux/kexec.h>
36 #include <linux/backlight.h>
37 #include <linux/bug.h>
38 #include <linux/kdebug.h>
39 #include <linux/ratelimit.h>
40 #include <linux/context_tracking.h>
41 #include <linux/smp.h>
42 #include <linux/console.h>
43 #include <linux/kmsg_dump.h>
44 
45 #include <asm/emulated_ops.h>
46 #include <asm/pgtable.h>
47 #include <linux/uaccess.h>
48 #include <asm/debugfs.h>
49 #include <asm/io.h>
50 #include <asm/machdep.h>
51 #include <asm/rtas.h>
52 #include <asm/pmc.h>
53 #include <asm/reg.h>
54 #ifdef CONFIG_PMAC_BACKLIGHT
55 #include <asm/backlight.h>
56 #endif
57 #ifdef CONFIG_PPC64
58 #include <asm/firmware.h>
59 #include <asm/processor.h>
60 #include <asm/tm.h>
61 #endif
62 #include <asm/kexec.h>
63 #include <asm/ppc-opcode.h>
64 #include <asm/rio.h>
65 #include <asm/fadump.h>
66 #include <asm/switch_to.h>
67 #include <asm/tm.h>
68 #include <asm/debug.h>
69 #include <asm/asm-prototypes.h>
70 #include <asm/hmi.h>
71 #include <sysdev/fsl_pci.h>
72 #include <asm/kprobes.h>
73 #include <asm/stacktrace.h>
74 
75 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
76 int (*__debugger)(struct pt_regs *regs) __read_mostly;
77 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
78 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
79 int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
80 int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
81 int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
82 int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
83 
84 EXPORT_SYMBOL(__debugger);
85 EXPORT_SYMBOL(__debugger_ipi);
86 EXPORT_SYMBOL(__debugger_bpt);
87 EXPORT_SYMBOL(__debugger_sstep);
88 EXPORT_SYMBOL(__debugger_iabr_match);
89 EXPORT_SYMBOL(__debugger_break_match);
90 EXPORT_SYMBOL(__debugger_fault_handler);
91 #endif
92 
93 /* Transactional Memory trap debug */
94 #ifdef TM_DEBUG_SW
95 #define TM_DEBUG(x...) printk(KERN_INFO x)
96 #else
97 #define TM_DEBUG(x...) do { } while(0)
98 #endif
99 
100 static const char *signame(int signr)
101 {
102 	switch (signr) {
103 	case SIGBUS:	return "bus error";
104 	case SIGFPE:	return "floating point exception";
105 	case SIGILL:	return "illegal instruction";
106 	case SIGSEGV:	return "segfault";
107 	case SIGTRAP:	return "unhandled trap";
108 	}
109 
110 	return "unknown signal";
111 }
112 
113 /*
114  * Trap & Exception support
115  */
116 
117 #ifdef CONFIG_PMAC_BACKLIGHT
118 static void pmac_backlight_unblank(void)
119 {
120 	mutex_lock(&pmac_backlight_mutex);
121 	if (pmac_backlight) {
122 		struct backlight_properties *props;
123 
124 		props = &pmac_backlight->props;
125 		props->brightness = props->max_brightness;
126 		props->power = FB_BLANK_UNBLANK;
127 		backlight_update_status(pmac_backlight);
128 	}
129 	mutex_unlock(&pmac_backlight_mutex);
130 }
131 #else
132 static inline void pmac_backlight_unblank(void) { }
133 #endif
134 
135 /*
136  * If oops/die is expected to crash the machine, return true here.
137  *
138  * This should not be expected to be 100% accurate, there may be
139  * notifiers registered or other unexpected conditions that may bring
140  * down the kernel. Or if the current process in the kernel is holding
141  * locks or has other critical state, the kernel may become effectively
142  * unusable anyway.
143  */
144 bool die_will_crash(void)
145 {
146 	if (should_fadump_crash())
147 		return true;
148 	if (kexec_should_crash(current))
149 		return true;
150 	if (in_interrupt() || panic_on_oops ||
151 			!current->pid || is_global_init(current))
152 		return true;
153 
154 	return false;
155 }
156 
157 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
158 static int die_owner = -1;
159 static unsigned int die_nest_count;
160 static int die_counter;
161 
162 extern void panic_flush_kmsg_start(void)
163 {
164 	/*
165 	 * These are mostly taken from kernel/panic.c, but tries to do
166 	 * relatively minimal work. Don't use delay functions (TB may
167 	 * be broken), don't crash dump (need to set a firmware log),
168 	 * don't run notifiers. We do want to get some information to
169 	 * Linux console.
170 	 */
171 	console_verbose();
172 	bust_spinlocks(1);
173 }
174 
175 extern void panic_flush_kmsg_end(void)
176 {
177 	printk_safe_flush_on_panic();
178 	kmsg_dump(KMSG_DUMP_PANIC);
179 	bust_spinlocks(0);
180 	debug_locks_off();
181 	console_flush_on_panic();
182 }
183 
184 static unsigned long oops_begin(struct pt_regs *regs)
185 {
186 	int cpu;
187 	unsigned long flags;
188 
189 	oops_enter();
190 
191 	/* racy, but better than risking deadlock. */
192 	raw_local_irq_save(flags);
193 	cpu = smp_processor_id();
194 	if (!arch_spin_trylock(&die_lock)) {
195 		if (cpu == die_owner)
196 			/* nested oops. should stop eventually */;
197 		else
198 			arch_spin_lock(&die_lock);
199 	}
200 	die_nest_count++;
201 	die_owner = cpu;
202 	console_verbose();
203 	bust_spinlocks(1);
204 	if (machine_is(powermac))
205 		pmac_backlight_unblank();
206 	return flags;
207 }
208 NOKPROBE_SYMBOL(oops_begin);
209 
210 static void oops_end(unsigned long flags, struct pt_regs *regs,
211 			       int signr)
212 {
213 	bust_spinlocks(0);
214 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
215 	die_nest_count--;
216 	oops_exit();
217 	printk("\n");
218 	if (!die_nest_count) {
219 		/* Nest count reaches zero, release the lock. */
220 		die_owner = -1;
221 		arch_spin_unlock(&die_lock);
222 	}
223 	raw_local_irq_restore(flags);
224 
225 	/*
226 	 * system_reset_excption handles debugger, crash dump, panic, for 0x100
227 	 */
228 	if (TRAP(regs) == 0x100)
229 		return;
230 
231 	crash_fadump(regs, "die oops");
232 
233 	if (kexec_should_crash(current))
234 		crash_kexec(regs);
235 
236 	if (!signr)
237 		return;
238 
239 	/*
240 	 * While our oops output is serialised by a spinlock, output
241 	 * from panic() called below can race and corrupt it. If we
242 	 * know we are going to panic, delay for 1 second so we have a
243 	 * chance to get clean backtraces from all CPUs that are oopsing.
244 	 */
245 	if (in_interrupt() || panic_on_oops || !current->pid ||
246 	    is_global_init(current)) {
247 		mdelay(MSEC_PER_SEC);
248 	}
249 
250 	if (in_interrupt())
251 		panic("Fatal exception in interrupt");
252 	if (panic_on_oops)
253 		panic("Fatal exception");
254 	do_exit(signr);
255 }
256 NOKPROBE_SYMBOL(oops_end);
257 
258 static int __die(const char *str, struct pt_regs *regs, long err)
259 {
260 	printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
261 
262 	if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
263 		printk("LE ");
264 	else
265 		printk("BE ");
266 
267 	if (IS_ENABLED(CONFIG_PREEMPT))
268 		pr_cont("PREEMPT ");
269 
270 	if (IS_ENABLED(CONFIG_SMP))
271 		pr_cont("SMP NR_CPUS=%d ", NR_CPUS);
272 
273 	if (debug_pagealloc_enabled())
274 		pr_cont("DEBUG_PAGEALLOC ");
275 
276 	if (IS_ENABLED(CONFIG_NUMA))
277 		pr_cont("NUMA ");
278 
279 	pr_cont("%s\n", ppc_md.name ? ppc_md.name : "");
280 
281 	if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
282 		return 1;
283 
284 	print_modules();
285 	show_regs(regs);
286 
287 	return 0;
288 }
289 NOKPROBE_SYMBOL(__die);
290 
291 void die(const char *str, struct pt_regs *regs, long err)
292 {
293 	unsigned long flags;
294 
295 	/*
296 	 * system_reset_excption handles debugger, crash dump, panic, for 0x100
297 	 */
298 	if (TRAP(regs) != 0x100) {
299 		if (debugger(regs))
300 			return;
301 	}
302 
303 	flags = oops_begin(regs);
304 	if (__die(str, regs, err))
305 		err = 0;
306 	oops_end(flags, regs, err);
307 }
308 NOKPROBE_SYMBOL(die);
309 
310 void user_single_step_siginfo(struct task_struct *tsk,
311 				struct pt_regs *regs, siginfo_t *info)
312 {
313 	info->si_signo = SIGTRAP;
314 	info->si_code = TRAP_TRACE;
315 	info->si_addr = (void __user *)regs->nip;
316 }
317 
318 static bool show_unhandled_signals_ratelimited(void)
319 {
320 	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
321 				      DEFAULT_RATELIMIT_BURST);
322 	return show_unhandled_signals && __ratelimit(&rs);
323 }
324 
325 static void show_signal_msg(int signr, struct pt_regs *regs, int code,
326 			    unsigned long addr)
327 {
328 	if (!show_unhandled_signals_ratelimited())
329 		return;
330 
331 	if (!unhandled_signal(current, signr))
332 		return;
333 
334 	pr_info("%s[%d]: %s (%d) at %lx nip %lx lr %lx code %x",
335 		current->comm, current->pid, signame(signr), signr,
336 		addr, regs->nip, regs->link, code);
337 
338 	print_vma_addr(KERN_CONT " in ", regs->nip);
339 
340 	pr_cont("\n");
341 
342 	show_user_instructions(regs);
343 }
344 
345 void _exception_pkey(int signr, struct pt_regs *regs, int code,
346 		     unsigned long addr, int key)
347 {
348 	siginfo_t info;
349 
350 	if (!user_mode(regs)) {
351 		die("Exception in kernel mode", regs, signr);
352 		return;
353 	}
354 
355 	show_signal_msg(signr, regs, code, addr);
356 
357 	if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
358 		local_irq_enable();
359 
360 	current->thread.trap_nr = code;
361 
362 	/*
363 	 * Save all the pkey registers AMR/IAMR/UAMOR. Eg: Core dumps need
364 	 * to capture the content, if the task gets killed.
365 	 */
366 	thread_pkey_regs_save(&current->thread);
367 
368 	clear_siginfo(&info);
369 	info.si_signo = signr;
370 	info.si_code = code;
371 	info.si_addr = (void __user *) addr;
372 	info.si_pkey = key;
373 
374 	force_sig_info(signr, &info, current);
375 }
376 
377 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
378 {
379 	_exception_pkey(signr, regs, code, addr, 0);
380 }
381 
382 void system_reset_exception(struct pt_regs *regs)
383 {
384 	/*
385 	 * Avoid crashes in case of nested NMI exceptions. Recoverability
386 	 * is determined by RI and in_nmi
387 	 */
388 	bool nested = in_nmi();
389 	if (!nested)
390 		nmi_enter();
391 
392 	__this_cpu_inc(irq_stat.sreset_irqs);
393 
394 	/* See if any machine dependent calls */
395 	if (ppc_md.system_reset_exception) {
396 		if (ppc_md.system_reset_exception(regs))
397 			goto out;
398 	}
399 
400 	if (debugger(regs))
401 		goto out;
402 
403 	/*
404 	 * A system reset is a request to dump, so we always send
405 	 * it through the crashdump code (if fadump or kdump are
406 	 * registered).
407 	 */
408 	crash_fadump(regs, "System Reset");
409 
410 	crash_kexec(regs);
411 
412 	/*
413 	 * We aren't the primary crash CPU. We need to send it
414 	 * to a holding pattern to avoid it ending up in the panic
415 	 * code.
416 	 */
417 	crash_kexec_secondary(regs);
418 
419 	/*
420 	 * No debugger or crash dump registered, print logs then
421 	 * panic.
422 	 */
423 	die("System Reset", regs, SIGABRT);
424 
425 	mdelay(2*MSEC_PER_SEC); /* Wait a little while for others to print */
426 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
427 	nmi_panic(regs, "System Reset");
428 
429 out:
430 #ifdef CONFIG_PPC_BOOK3S_64
431 	BUG_ON(get_paca()->in_nmi == 0);
432 	if (get_paca()->in_nmi > 1)
433 		nmi_panic(regs, "Unrecoverable nested System Reset");
434 #endif
435 	/* Must die if the interrupt is not recoverable */
436 	if (!(regs->msr & MSR_RI))
437 		nmi_panic(regs, "Unrecoverable System Reset");
438 
439 	if (!nested)
440 		nmi_exit();
441 
442 	/* What should we do here? We could issue a shutdown or hard reset. */
443 }
444 
445 /*
446  * I/O accesses can cause machine checks on powermacs.
447  * Check if the NIP corresponds to the address of a sync
448  * instruction for which there is an entry in the exception
449  * table.
450  * Note that the 601 only takes a machine check on TEA
451  * (transfer error ack) signal assertion, and does not
452  * set any of the top 16 bits of SRR1.
453  *  -- paulus.
454  */
455 static inline int check_io_access(struct pt_regs *regs)
456 {
457 #ifdef CONFIG_PPC32
458 	unsigned long msr = regs->msr;
459 	const struct exception_table_entry *entry;
460 	unsigned int *nip = (unsigned int *)regs->nip;
461 
462 	if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
463 	    && (entry = search_exception_tables(regs->nip)) != NULL) {
464 		/*
465 		 * Check that it's a sync instruction, or somewhere
466 		 * in the twi; isync; nop sequence that inb/inw/inl uses.
467 		 * As the address is in the exception table
468 		 * we should be able to read the instr there.
469 		 * For the debug message, we look at the preceding
470 		 * load or store.
471 		 */
472 		if (*nip == PPC_INST_NOP)
473 			nip -= 2;
474 		else if (*nip == PPC_INST_ISYNC)
475 			--nip;
476 		if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) {
477 			unsigned int rb;
478 
479 			--nip;
480 			rb = (*nip >> 11) & 0x1f;
481 			printk(KERN_DEBUG "%s bad port %lx at %p\n",
482 			       (*nip & 0x100)? "OUT to": "IN from",
483 			       regs->gpr[rb] - _IO_BASE, nip);
484 			regs->msr |= MSR_RI;
485 			regs->nip = extable_fixup(entry);
486 			return 1;
487 		}
488 	}
489 #endif /* CONFIG_PPC32 */
490 	return 0;
491 }
492 
493 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
494 /* On 4xx, the reason for the machine check or program exception
495    is in the ESR. */
496 #define get_reason(regs)	((regs)->dsisr)
497 #define REASON_FP		ESR_FP
498 #define REASON_ILLEGAL		(ESR_PIL | ESR_PUO)
499 #define REASON_PRIVILEGED	ESR_PPR
500 #define REASON_TRAP		ESR_PTR
501 
502 /* single-step stuff */
503 #define single_stepping(regs)	(current->thread.debug.dbcr0 & DBCR0_IC)
504 #define clear_single_step(regs)	(current->thread.debug.dbcr0 &= ~DBCR0_IC)
505 #define clear_br_trace(regs)	do {} while(0)
506 #else
507 /* On non-4xx, the reason for the machine check or program
508    exception is in the MSR. */
509 #define get_reason(regs)	((regs)->msr)
510 #define REASON_TM		SRR1_PROGTM
511 #define REASON_FP		SRR1_PROGFPE
512 #define REASON_ILLEGAL		SRR1_PROGILL
513 #define REASON_PRIVILEGED	SRR1_PROGPRIV
514 #define REASON_TRAP		SRR1_PROGTRAP
515 
516 #define single_stepping(regs)	((regs)->msr & MSR_SE)
517 #define clear_single_step(regs)	((regs)->msr &= ~MSR_SE)
518 #define clear_br_trace(regs)	((regs)->msr &= ~MSR_BE)
519 #endif
520 
521 #if defined(CONFIG_E500)
522 int machine_check_e500mc(struct pt_regs *regs)
523 {
524 	unsigned long mcsr = mfspr(SPRN_MCSR);
525 	unsigned long pvr = mfspr(SPRN_PVR);
526 	unsigned long reason = mcsr;
527 	int recoverable = 1;
528 
529 	if (reason & MCSR_LD) {
530 		recoverable = fsl_rio_mcheck_exception(regs);
531 		if (recoverable == 1)
532 			goto silent_out;
533 	}
534 
535 	printk("Machine check in kernel mode.\n");
536 	printk("Caused by (from MCSR=%lx): ", reason);
537 
538 	if (reason & MCSR_MCP)
539 		printk("Machine Check Signal\n");
540 
541 	if (reason & MCSR_ICPERR) {
542 		printk("Instruction Cache Parity Error\n");
543 
544 		/*
545 		 * This is recoverable by invalidating the i-cache.
546 		 */
547 		mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
548 		while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
549 			;
550 
551 		/*
552 		 * This will generally be accompanied by an instruction
553 		 * fetch error report -- only treat MCSR_IF as fatal
554 		 * if it wasn't due to an L1 parity error.
555 		 */
556 		reason &= ~MCSR_IF;
557 	}
558 
559 	if (reason & MCSR_DCPERR_MC) {
560 		printk("Data Cache Parity Error\n");
561 
562 		/*
563 		 * In write shadow mode we auto-recover from the error, but it
564 		 * may still get logged and cause a machine check.  We should
565 		 * only treat the non-write shadow case as non-recoverable.
566 		 */
567 		/* On e6500 core, L1 DCWS (Data cache write shadow mode) bit
568 		 * is not implemented but L1 data cache always runs in write
569 		 * shadow mode. Hence on data cache parity errors HW will
570 		 * automatically invalidate the L1 Data Cache.
571 		 */
572 		if (PVR_VER(pvr) != PVR_VER_E6500) {
573 			if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
574 				recoverable = 0;
575 		}
576 	}
577 
578 	if (reason & MCSR_L2MMU_MHIT) {
579 		printk("Hit on multiple TLB entries\n");
580 		recoverable = 0;
581 	}
582 
583 	if (reason & MCSR_NMI)
584 		printk("Non-maskable interrupt\n");
585 
586 	if (reason & MCSR_IF) {
587 		printk("Instruction Fetch Error Report\n");
588 		recoverable = 0;
589 	}
590 
591 	if (reason & MCSR_LD) {
592 		printk("Load Error Report\n");
593 		recoverable = 0;
594 	}
595 
596 	if (reason & MCSR_ST) {
597 		printk("Store Error Report\n");
598 		recoverable = 0;
599 	}
600 
601 	if (reason & MCSR_LDG) {
602 		printk("Guarded Load Error Report\n");
603 		recoverable = 0;
604 	}
605 
606 	if (reason & MCSR_TLBSYNC)
607 		printk("Simultaneous tlbsync operations\n");
608 
609 	if (reason & MCSR_BSL2_ERR) {
610 		printk("Level 2 Cache Error\n");
611 		recoverable = 0;
612 	}
613 
614 	if (reason & MCSR_MAV) {
615 		u64 addr;
616 
617 		addr = mfspr(SPRN_MCAR);
618 		addr |= (u64)mfspr(SPRN_MCARU) << 32;
619 
620 		printk("Machine Check %s Address: %#llx\n",
621 		       reason & MCSR_MEA ? "Effective" : "Physical", addr);
622 	}
623 
624 silent_out:
625 	mtspr(SPRN_MCSR, mcsr);
626 	return mfspr(SPRN_MCSR) == 0 && recoverable;
627 }
628 
629 int machine_check_e500(struct pt_regs *regs)
630 {
631 	unsigned long reason = mfspr(SPRN_MCSR);
632 
633 	if (reason & MCSR_BUS_RBERR) {
634 		if (fsl_rio_mcheck_exception(regs))
635 			return 1;
636 		if (fsl_pci_mcheck_exception(regs))
637 			return 1;
638 	}
639 
640 	printk("Machine check in kernel mode.\n");
641 	printk("Caused by (from MCSR=%lx): ", reason);
642 
643 	if (reason & MCSR_MCP)
644 		printk("Machine Check Signal\n");
645 	if (reason & MCSR_ICPERR)
646 		printk("Instruction Cache Parity Error\n");
647 	if (reason & MCSR_DCP_PERR)
648 		printk("Data Cache Push Parity Error\n");
649 	if (reason & MCSR_DCPERR)
650 		printk("Data Cache Parity Error\n");
651 	if (reason & MCSR_BUS_IAERR)
652 		printk("Bus - Instruction Address Error\n");
653 	if (reason & MCSR_BUS_RAERR)
654 		printk("Bus - Read Address Error\n");
655 	if (reason & MCSR_BUS_WAERR)
656 		printk("Bus - Write Address Error\n");
657 	if (reason & MCSR_BUS_IBERR)
658 		printk("Bus - Instruction Data Error\n");
659 	if (reason & MCSR_BUS_RBERR)
660 		printk("Bus - Read Data Bus Error\n");
661 	if (reason & MCSR_BUS_WBERR)
662 		printk("Bus - Write Data Bus Error\n");
663 	if (reason & MCSR_BUS_IPERR)
664 		printk("Bus - Instruction Parity Error\n");
665 	if (reason & MCSR_BUS_RPERR)
666 		printk("Bus - Read Parity Error\n");
667 
668 	return 0;
669 }
670 
671 int machine_check_generic(struct pt_regs *regs)
672 {
673 	return 0;
674 }
675 #elif defined(CONFIG_E200)
676 int machine_check_e200(struct pt_regs *regs)
677 {
678 	unsigned long reason = mfspr(SPRN_MCSR);
679 
680 	printk("Machine check in kernel mode.\n");
681 	printk("Caused by (from MCSR=%lx): ", reason);
682 
683 	if (reason & MCSR_MCP)
684 		printk("Machine Check Signal\n");
685 	if (reason & MCSR_CP_PERR)
686 		printk("Cache Push Parity Error\n");
687 	if (reason & MCSR_CPERR)
688 		printk("Cache Parity Error\n");
689 	if (reason & MCSR_EXCP_ERR)
690 		printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
691 	if (reason & MCSR_BUS_IRERR)
692 		printk("Bus - Read Bus Error on instruction fetch\n");
693 	if (reason & MCSR_BUS_DRERR)
694 		printk("Bus - Read Bus Error on data load\n");
695 	if (reason & MCSR_BUS_WRERR)
696 		printk("Bus - Write Bus Error on buffered store or cache line push\n");
697 
698 	return 0;
699 }
700 #elif defined(CONFIG_PPC32)
701 int machine_check_generic(struct pt_regs *regs)
702 {
703 	unsigned long reason = regs->msr;
704 
705 	printk("Machine check in kernel mode.\n");
706 	printk("Caused by (from SRR1=%lx): ", reason);
707 	switch (reason & 0x601F0000) {
708 	case 0x80000:
709 		printk("Machine check signal\n");
710 		break;
711 	case 0:		/* for 601 */
712 	case 0x40000:
713 	case 0x140000:	/* 7450 MSS error and TEA */
714 		printk("Transfer error ack signal\n");
715 		break;
716 	case 0x20000:
717 		printk("Data parity error signal\n");
718 		break;
719 	case 0x10000:
720 		printk("Address parity error signal\n");
721 		break;
722 	case 0x20000000:
723 		printk("L1 Data Cache error\n");
724 		break;
725 	case 0x40000000:
726 		printk("L1 Instruction Cache error\n");
727 		break;
728 	case 0x00100000:
729 		printk("L2 data cache parity error\n");
730 		break;
731 	default:
732 		printk("Unknown values in msr\n");
733 	}
734 	return 0;
735 }
736 #endif /* everything else */
737 
738 void machine_check_exception(struct pt_regs *regs)
739 {
740 	int recover = 0;
741 	bool nested = in_nmi();
742 	if (!nested)
743 		nmi_enter();
744 
745 	/* 64s accounts the mce in machine_check_early when in HVMODE */
746 	if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !cpu_has_feature(CPU_FTR_HVMODE))
747 		__this_cpu_inc(irq_stat.mce_exceptions);
748 
749 	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
750 
751 	/* See if any machine dependent calls. In theory, we would want
752 	 * to call the CPU first, and call the ppc_md. one if the CPU
753 	 * one returns a positive number. However there is existing code
754 	 * that assumes the board gets a first chance, so let's keep it
755 	 * that way for now and fix things later. --BenH.
756 	 */
757 	if (ppc_md.machine_check_exception)
758 		recover = ppc_md.machine_check_exception(regs);
759 	else if (cur_cpu_spec->machine_check)
760 		recover = cur_cpu_spec->machine_check(regs);
761 
762 	if (recover > 0)
763 		goto bail;
764 
765 	if (debugger_fault_handler(regs))
766 		goto bail;
767 
768 	if (check_io_access(regs))
769 		goto bail;
770 
771 	die("Machine check", regs, SIGBUS);
772 
773 	/* Must die if the interrupt is not recoverable */
774 	if (!(regs->msr & MSR_RI))
775 		nmi_panic(regs, "Unrecoverable Machine check");
776 
777 bail:
778 	if (!nested)
779 		nmi_exit();
780 }
781 
782 void SMIException(struct pt_regs *regs)
783 {
784 	die("System Management Interrupt", regs, SIGABRT);
785 }
786 
787 #ifdef CONFIG_VSX
788 static void p9_hmi_special_emu(struct pt_regs *regs)
789 {
790 	unsigned int ra, rb, t, i, sel, instr, rc;
791 	const void __user *addr;
792 	u8 vbuf[16], *vdst;
793 	unsigned long ea, msr, msr_mask;
794 	bool swap;
795 
796 	if (__get_user_inatomic(instr, (unsigned int __user *)regs->nip))
797 		return;
798 
799 	/*
800 	 * lxvb16x	opcode: 0x7c0006d8
801 	 * lxvd2x	opcode: 0x7c000698
802 	 * lxvh8x	opcode: 0x7c000658
803 	 * lxvw4x	opcode: 0x7c000618
804 	 */
805 	if ((instr & 0xfc00073e) != 0x7c000618) {
806 		pr_devel("HMI vec emu: not vector CI %i:%s[%d] nip=%016lx"
807 			 " instr=%08x\n",
808 			 smp_processor_id(), current->comm, current->pid,
809 			 regs->nip, instr);
810 		return;
811 	}
812 
813 	/* Grab vector registers into the task struct */
814 	msr = regs->msr; /* Grab msr before we flush the bits */
815 	flush_vsx_to_thread(current);
816 	enable_kernel_altivec();
817 
818 	/*
819 	 * Is userspace running with a different endian (this is rare but
820 	 * not impossible)
821 	 */
822 	swap = (msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
823 
824 	/* Decode the instruction */
825 	ra = (instr >> 16) & 0x1f;
826 	rb = (instr >> 11) & 0x1f;
827 	t = (instr >> 21) & 0x1f;
828 	if (instr & 1)
829 		vdst = (u8 *)&current->thread.vr_state.vr[t];
830 	else
831 		vdst = (u8 *)&current->thread.fp_state.fpr[t][0];
832 
833 	/* Grab the vector address */
834 	ea = regs->gpr[rb] + (ra ? regs->gpr[ra] : 0);
835 	if (is_32bit_task())
836 		ea &= 0xfffffffful;
837 	addr = (__force const void __user *)ea;
838 
839 	/* Check it */
840 	if (!access_ok(VERIFY_READ, addr, 16)) {
841 		pr_devel("HMI vec emu: bad access %i:%s[%d] nip=%016lx"
842 			 " instr=%08x addr=%016lx\n",
843 			 smp_processor_id(), current->comm, current->pid,
844 			 regs->nip, instr, (unsigned long)addr);
845 		return;
846 	}
847 
848 	/* Read the vector */
849 	rc = 0;
850 	if ((unsigned long)addr & 0xfUL)
851 		/* unaligned case */
852 		rc = __copy_from_user_inatomic(vbuf, addr, 16);
853 	else
854 		__get_user_atomic_128_aligned(vbuf, addr, rc);
855 	if (rc) {
856 		pr_devel("HMI vec emu: page fault %i:%s[%d] nip=%016lx"
857 			 " instr=%08x addr=%016lx\n",
858 			 smp_processor_id(), current->comm, current->pid,
859 			 regs->nip, instr, (unsigned long)addr);
860 		return;
861 	}
862 
863 	pr_devel("HMI vec emu: emulated vector CI %i:%s[%d] nip=%016lx"
864 		 " instr=%08x addr=%016lx\n",
865 		 smp_processor_id(), current->comm, current->pid, regs->nip,
866 		 instr, (unsigned long) addr);
867 
868 	/* Grab instruction "selector" */
869 	sel = (instr >> 6) & 3;
870 
871 	/*
872 	 * Check to make sure the facility is actually enabled. This
873 	 * could happen if we get a false positive hit.
874 	 *
875 	 * lxvd2x/lxvw4x always check MSR VSX sel = 0,2
876 	 * lxvh8x/lxvb16x check MSR VSX or VEC depending on VSR used sel = 1,3
877 	 */
878 	msr_mask = MSR_VSX;
879 	if ((sel & 1) && (instr & 1)) /* lxvh8x & lxvb16x + VSR >= 32 */
880 		msr_mask = MSR_VEC;
881 	if (!(msr & msr_mask)) {
882 		pr_devel("HMI vec emu: MSR fac clear %i:%s[%d] nip=%016lx"
883 			 " instr=%08x msr:%016lx\n",
884 			 smp_processor_id(), current->comm, current->pid,
885 			 regs->nip, instr, msr);
886 		return;
887 	}
888 
889 	/* Do logging here before we modify sel based on endian */
890 	switch (sel) {
891 	case 0:	/* lxvw4x */
892 		PPC_WARN_EMULATED(lxvw4x, regs);
893 		break;
894 	case 1: /* lxvh8x */
895 		PPC_WARN_EMULATED(lxvh8x, regs);
896 		break;
897 	case 2: /* lxvd2x */
898 		PPC_WARN_EMULATED(lxvd2x, regs);
899 		break;
900 	case 3: /* lxvb16x */
901 		PPC_WARN_EMULATED(lxvb16x, regs);
902 		break;
903 	}
904 
905 #ifdef __LITTLE_ENDIAN__
906 	/*
907 	 * An LE kernel stores the vector in the task struct as an LE
908 	 * byte array (effectively swapping both the components and
909 	 * the content of the components). Those instructions expect
910 	 * the components to remain in ascending address order, so we
911 	 * swap them back.
912 	 *
913 	 * If we are running a BE user space, the expectation is that
914 	 * of a simple memcpy, so forcing the emulation to look like
915 	 * a lxvb16x should do the trick.
916 	 */
917 	if (swap)
918 		sel = 3;
919 
920 	switch (sel) {
921 	case 0:	/* lxvw4x */
922 		for (i = 0; i < 4; i++)
923 			((u32 *)vdst)[i] = ((u32 *)vbuf)[3-i];
924 		break;
925 	case 1: /* lxvh8x */
926 		for (i = 0; i < 8; i++)
927 			((u16 *)vdst)[i] = ((u16 *)vbuf)[7-i];
928 		break;
929 	case 2: /* lxvd2x */
930 		for (i = 0; i < 2; i++)
931 			((u64 *)vdst)[i] = ((u64 *)vbuf)[1-i];
932 		break;
933 	case 3: /* lxvb16x */
934 		for (i = 0; i < 16; i++)
935 			vdst[i] = vbuf[15-i];
936 		break;
937 	}
938 #else /* __LITTLE_ENDIAN__ */
939 	/* On a big endian kernel, a BE userspace only needs a memcpy */
940 	if (!swap)
941 		sel = 3;
942 
943 	/* Otherwise, we need to swap the content of the components */
944 	switch (sel) {
945 	case 0:	/* lxvw4x */
946 		for (i = 0; i < 4; i++)
947 			((u32 *)vdst)[i] = cpu_to_le32(((u32 *)vbuf)[i]);
948 		break;
949 	case 1: /* lxvh8x */
950 		for (i = 0; i < 8; i++)
951 			((u16 *)vdst)[i] = cpu_to_le16(((u16 *)vbuf)[i]);
952 		break;
953 	case 2: /* lxvd2x */
954 		for (i = 0; i < 2; i++)
955 			((u64 *)vdst)[i] = cpu_to_le64(((u64 *)vbuf)[i]);
956 		break;
957 	case 3: /* lxvb16x */
958 		memcpy(vdst, vbuf, 16);
959 		break;
960 	}
961 #endif /* !__LITTLE_ENDIAN__ */
962 
963 	/* Go to next instruction */
964 	regs->nip += 4;
965 }
966 #endif /* CONFIG_VSX */
967 
968 void handle_hmi_exception(struct pt_regs *regs)
969 {
970 	struct pt_regs *old_regs;
971 
972 	old_regs = set_irq_regs(regs);
973 	irq_enter();
974 
975 #ifdef CONFIG_VSX
976 	/* Real mode flagged P9 special emu is needed */
977 	if (local_paca->hmi_p9_special_emu) {
978 		local_paca->hmi_p9_special_emu = 0;
979 
980 		/*
981 		 * We don't want to take page faults while doing the
982 		 * emulation, we just replay the instruction if necessary.
983 		 */
984 		pagefault_disable();
985 		p9_hmi_special_emu(regs);
986 		pagefault_enable();
987 	}
988 #endif /* CONFIG_VSX */
989 
990 	if (ppc_md.handle_hmi_exception)
991 		ppc_md.handle_hmi_exception(regs);
992 
993 	irq_exit();
994 	set_irq_regs(old_regs);
995 }
996 
997 void unknown_exception(struct pt_regs *regs)
998 {
999 	enum ctx_state prev_state = exception_enter();
1000 
1001 	printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
1002 	       regs->nip, regs->msr, regs->trap);
1003 
1004 	_exception(SIGTRAP, regs, TRAP_UNK, 0);
1005 
1006 	exception_exit(prev_state);
1007 }
1008 
1009 void instruction_breakpoint_exception(struct pt_regs *regs)
1010 {
1011 	enum ctx_state prev_state = exception_enter();
1012 
1013 	if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
1014 					5, SIGTRAP) == NOTIFY_STOP)
1015 		goto bail;
1016 	if (debugger_iabr_match(regs))
1017 		goto bail;
1018 	_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1019 
1020 bail:
1021 	exception_exit(prev_state);
1022 }
1023 
1024 void RunModeException(struct pt_regs *regs)
1025 {
1026 	_exception(SIGTRAP, regs, TRAP_UNK, 0);
1027 }
1028 
1029 void single_step_exception(struct pt_regs *regs)
1030 {
1031 	enum ctx_state prev_state = exception_enter();
1032 
1033 	clear_single_step(regs);
1034 	clear_br_trace(regs);
1035 
1036 	if (kprobe_post_handler(regs))
1037 		return;
1038 
1039 	if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1040 					5, SIGTRAP) == NOTIFY_STOP)
1041 		goto bail;
1042 	if (debugger_sstep(regs))
1043 		goto bail;
1044 
1045 	_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1046 
1047 bail:
1048 	exception_exit(prev_state);
1049 }
1050 NOKPROBE_SYMBOL(single_step_exception);
1051 
1052 /*
1053  * After we have successfully emulated an instruction, we have to
1054  * check if the instruction was being single-stepped, and if so,
1055  * pretend we got a single-step exception.  This was pointed out
1056  * by Kumar Gala.  -- paulus
1057  */
1058 static void emulate_single_step(struct pt_regs *regs)
1059 {
1060 	if (single_stepping(regs))
1061 		single_step_exception(regs);
1062 }
1063 
1064 static inline int __parse_fpscr(unsigned long fpscr)
1065 {
1066 	int ret = FPE_FLTUNK;
1067 
1068 	/* Invalid operation */
1069 	if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
1070 		ret = FPE_FLTINV;
1071 
1072 	/* Overflow */
1073 	else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
1074 		ret = FPE_FLTOVF;
1075 
1076 	/* Underflow */
1077 	else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
1078 		ret = FPE_FLTUND;
1079 
1080 	/* Divide by zero */
1081 	else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
1082 		ret = FPE_FLTDIV;
1083 
1084 	/* Inexact result */
1085 	else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
1086 		ret = FPE_FLTRES;
1087 
1088 	return ret;
1089 }
1090 
1091 static void parse_fpe(struct pt_regs *regs)
1092 {
1093 	int code = 0;
1094 
1095 	flush_fp_to_thread(current);
1096 
1097 	code = __parse_fpscr(current->thread.fp_state.fpscr);
1098 
1099 	_exception(SIGFPE, regs, code, regs->nip);
1100 }
1101 
1102 /*
1103  * Illegal instruction emulation support.  Originally written to
1104  * provide the PVR to user applications using the mfspr rd, PVR.
1105  * Return non-zero if we can't emulate, or -EFAULT if the associated
1106  * memory access caused an access fault.  Return zero on success.
1107  *
1108  * There are a couple of ways to do this, either "decode" the instruction
1109  * or directly match lots of bits.  In this case, matching lots of
1110  * bits is faster and easier.
1111  *
1112  */
1113 static int emulate_string_inst(struct pt_regs *regs, u32 instword)
1114 {
1115 	u8 rT = (instword >> 21) & 0x1f;
1116 	u8 rA = (instword >> 16) & 0x1f;
1117 	u8 NB_RB = (instword >> 11) & 0x1f;
1118 	u32 num_bytes;
1119 	unsigned long EA;
1120 	int pos = 0;
1121 
1122 	/* Early out if we are an invalid form of lswx */
1123 	if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
1124 		if ((rT == rA) || (rT == NB_RB))
1125 			return -EINVAL;
1126 
1127 	EA = (rA == 0) ? 0 : regs->gpr[rA];
1128 
1129 	switch (instword & PPC_INST_STRING_MASK) {
1130 		case PPC_INST_LSWX:
1131 		case PPC_INST_STSWX:
1132 			EA += NB_RB;
1133 			num_bytes = regs->xer & 0x7f;
1134 			break;
1135 		case PPC_INST_LSWI:
1136 		case PPC_INST_STSWI:
1137 			num_bytes = (NB_RB == 0) ? 32 : NB_RB;
1138 			break;
1139 		default:
1140 			return -EINVAL;
1141 	}
1142 
1143 	while (num_bytes != 0)
1144 	{
1145 		u8 val;
1146 		u32 shift = 8 * (3 - (pos & 0x3));
1147 
1148 		/* if process is 32-bit, clear upper 32 bits of EA */
1149 		if ((regs->msr & MSR_64BIT) == 0)
1150 			EA &= 0xFFFFFFFF;
1151 
1152 		switch ((instword & PPC_INST_STRING_MASK)) {
1153 			case PPC_INST_LSWX:
1154 			case PPC_INST_LSWI:
1155 				if (get_user(val, (u8 __user *)EA))
1156 					return -EFAULT;
1157 				/* first time updating this reg,
1158 				 * zero it out */
1159 				if (pos == 0)
1160 					regs->gpr[rT] = 0;
1161 				regs->gpr[rT] |= val << shift;
1162 				break;
1163 			case PPC_INST_STSWI:
1164 			case PPC_INST_STSWX:
1165 				val = regs->gpr[rT] >> shift;
1166 				if (put_user(val, (u8 __user *)EA))
1167 					return -EFAULT;
1168 				break;
1169 		}
1170 		/* move EA to next address */
1171 		EA += 1;
1172 		num_bytes--;
1173 
1174 		/* manage our position within the register */
1175 		if (++pos == 4) {
1176 			pos = 0;
1177 			if (++rT == 32)
1178 				rT = 0;
1179 		}
1180 	}
1181 
1182 	return 0;
1183 }
1184 
1185 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
1186 {
1187 	u32 ra,rs;
1188 	unsigned long tmp;
1189 
1190 	ra = (instword >> 16) & 0x1f;
1191 	rs = (instword >> 21) & 0x1f;
1192 
1193 	tmp = regs->gpr[rs];
1194 	tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
1195 	tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
1196 	tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1197 	regs->gpr[ra] = tmp;
1198 
1199 	return 0;
1200 }
1201 
1202 static int emulate_isel(struct pt_regs *regs, u32 instword)
1203 {
1204 	u8 rT = (instword >> 21) & 0x1f;
1205 	u8 rA = (instword >> 16) & 0x1f;
1206 	u8 rB = (instword >> 11) & 0x1f;
1207 	u8 BC = (instword >> 6) & 0x1f;
1208 	u8 bit;
1209 	unsigned long tmp;
1210 
1211 	tmp = (rA == 0) ? 0 : regs->gpr[rA];
1212 	bit = (regs->ccr >> (31 - BC)) & 0x1;
1213 
1214 	regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
1215 
1216 	return 0;
1217 }
1218 
1219 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1220 static inline bool tm_abort_check(struct pt_regs *regs, int cause)
1221 {
1222         /* If we're emulating a load/store in an active transaction, we cannot
1223          * emulate it as the kernel operates in transaction suspended context.
1224          * We need to abort the transaction.  This creates a persistent TM
1225          * abort so tell the user what caused it with a new code.
1226 	 */
1227 	if (MSR_TM_TRANSACTIONAL(regs->msr)) {
1228 		tm_enable();
1229 		tm_abort(cause);
1230 		return true;
1231 	}
1232 	return false;
1233 }
1234 #else
1235 static inline bool tm_abort_check(struct pt_regs *regs, int reason)
1236 {
1237 	return false;
1238 }
1239 #endif
1240 
1241 static int emulate_instruction(struct pt_regs *regs)
1242 {
1243 	u32 instword;
1244 	u32 rd;
1245 
1246 	if (!user_mode(regs))
1247 		return -EINVAL;
1248 	CHECK_FULL_REGS(regs);
1249 
1250 	if (get_user(instword, (u32 __user *)(regs->nip)))
1251 		return -EFAULT;
1252 
1253 	/* Emulate the mfspr rD, PVR. */
1254 	if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
1255 		PPC_WARN_EMULATED(mfpvr, regs);
1256 		rd = (instword >> 21) & 0x1f;
1257 		regs->gpr[rd] = mfspr(SPRN_PVR);
1258 		return 0;
1259 	}
1260 
1261 	/* Emulating the dcba insn is just a no-op.  */
1262 	if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
1263 		PPC_WARN_EMULATED(dcba, regs);
1264 		return 0;
1265 	}
1266 
1267 	/* Emulate the mcrxr insn.  */
1268 	if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
1269 		int shift = (instword >> 21) & 0x1c;
1270 		unsigned long msk = 0xf0000000UL >> shift;
1271 
1272 		PPC_WARN_EMULATED(mcrxr, regs);
1273 		regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
1274 		regs->xer &= ~0xf0000000UL;
1275 		return 0;
1276 	}
1277 
1278 	/* Emulate load/store string insn. */
1279 	if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
1280 		if (tm_abort_check(regs,
1281 				   TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
1282 			return -EINVAL;
1283 		PPC_WARN_EMULATED(string, regs);
1284 		return emulate_string_inst(regs, instword);
1285 	}
1286 
1287 	/* Emulate the popcntb (Population Count Bytes) instruction. */
1288 	if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
1289 		PPC_WARN_EMULATED(popcntb, regs);
1290 		return emulate_popcntb_inst(regs, instword);
1291 	}
1292 
1293 	/* Emulate isel (Integer Select) instruction */
1294 	if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
1295 		PPC_WARN_EMULATED(isel, regs);
1296 		return emulate_isel(regs, instword);
1297 	}
1298 
1299 	/* Emulate sync instruction variants */
1300 	if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) {
1301 		PPC_WARN_EMULATED(sync, regs);
1302 		asm volatile("sync");
1303 		return 0;
1304 	}
1305 
1306 #ifdef CONFIG_PPC64
1307 	/* Emulate the mfspr rD, DSCR. */
1308 	if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
1309 		PPC_INST_MFSPR_DSCR_USER) ||
1310 	     ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
1311 		PPC_INST_MFSPR_DSCR)) &&
1312 			cpu_has_feature(CPU_FTR_DSCR)) {
1313 		PPC_WARN_EMULATED(mfdscr, regs);
1314 		rd = (instword >> 21) & 0x1f;
1315 		regs->gpr[rd] = mfspr(SPRN_DSCR);
1316 		return 0;
1317 	}
1318 	/* Emulate the mtspr DSCR, rD. */
1319 	if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
1320 		PPC_INST_MTSPR_DSCR_USER) ||
1321 	     ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
1322 		PPC_INST_MTSPR_DSCR)) &&
1323 			cpu_has_feature(CPU_FTR_DSCR)) {
1324 		PPC_WARN_EMULATED(mtdscr, regs);
1325 		rd = (instword >> 21) & 0x1f;
1326 		current->thread.dscr = regs->gpr[rd];
1327 		current->thread.dscr_inherit = 1;
1328 		mtspr(SPRN_DSCR, current->thread.dscr);
1329 		return 0;
1330 	}
1331 #endif
1332 
1333 	return -EINVAL;
1334 }
1335 
1336 int is_valid_bugaddr(unsigned long addr)
1337 {
1338 	return is_kernel_addr(addr);
1339 }
1340 
1341 #ifdef CONFIG_MATH_EMULATION
1342 static int emulate_math(struct pt_regs *regs)
1343 {
1344 	int ret;
1345 	extern int do_mathemu(struct pt_regs *regs);
1346 
1347 	ret = do_mathemu(regs);
1348 	if (ret >= 0)
1349 		PPC_WARN_EMULATED(math, regs);
1350 
1351 	switch (ret) {
1352 	case 0:
1353 		emulate_single_step(regs);
1354 		return 0;
1355 	case 1: {
1356 			int code = 0;
1357 			code = __parse_fpscr(current->thread.fp_state.fpscr);
1358 			_exception(SIGFPE, regs, code, regs->nip);
1359 			return 0;
1360 		}
1361 	case -EFAULT:
1362 		_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1363 		return 0;
1364 	}
1365 
1366 	return -1;
1367 }
1368 #else
1369 static inline int emulate_math(struct pt_regs *regs) { return -1; }
1370 #endif
1371 
1372 void program_check_exception(struct pt_regs *regs)
1373 {
1374 	enum ctx_state prev_state = exception_enter();
1375 	unsigned int reason = get_reason(regs);
1376 
1377 	/* We can now get here via a FP Unavailable exception if the core
1378 	 * has no FPU, in that case the reason flags will be 0 */
1379 
1380 	if (reason & REASON_FP) {
1381 		/* IEEE FP exception */
1382 		parse_fpe(regs);
1383 		goto bail;
1384 	}
1385 	if (reason & REASON_TRAP) {
1386 		unsigned long bugaddr;
1387 		/* Debugger is first in line to stop recursive faults in
1388 		 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1389 		if (debugger_bpt(regs))
1390 			goto bail;
1391 
1392 		if (kprobe_handler(regs))
1393 			goto bail;
1394 
1395 		/* trap exception */
1396 		if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
1397 				== NOTIFY_STOP)
1398 			goto bail;
1399 
1400 		bugaddr = regs->nip;
1401 		/*
1402 		 * Fixup bugaddr for BUG_ON() in real mode
1403 		 */
1404 		if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR))
1405 			bugaddr += PAGE_OFFSET;
1406 
1407 		if (!(regs->msr & MSR_PR) &&  /* not user-mode */
1408 		    report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
1409 			regs->nip += 4;
1410 			goto bail;
1411 		}
1412 		_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1413 		goto bail;
1414 	}
1415 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1416 	if (reason & REASON_TM) {
1417 		/* This is a TM "Bad Thing Exception" program check.
1418 		 * This occurs when:
1419 		 * -  An rfid/hrfid/mtmsrd attempts to cause an illegal
1420 		 *    transition in TM states.
1421 		 * -  A trechkpt is attempted when transactional.
1422 		 * -  A treclaim is attempted when non transactional.
1423 		 * -  A tend is illegally attempted.
1424 		 * -  writing a TM SPR when transactional.
1425 		 *
1426 		 * If usermode caused this, it's done something illegal and
1427 		 * gets a SIGILL slap on the wrist.  We call it an illegal
1428 		 * operand to distinguish from the instruction just being bad
1429 		 * (e.g. executing a 'tend' on a CPU without TM!); it's an
1430 		 * illegal /placement/ of a valid instruction.
1431 		 */
1432 		if (user_mode(regs)) {
1433 			_exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
1434 			goto bail;
1435 		} else {
1436 			printk(KERN_EMERG "Unexpected TM Bad Thing exception "
1437 			       "at %lx (msr 0x%x)\n", regs->nip, reason);
1438 			die("Unrecoverable exception", regs, SIGABRT);
1439 		}
1440 	}
1441 #endif
1442 
1443 	/*
1444 	 * If we took the program check in the kernel skip down to sending a
1445 	 * SIGILL. The subsequent cases all relate to emulating instructions
1446 	 * which we should only do for userspace. We also do not want to enable
1447 	 * interrupts for kernel faults because that might lead to further
1448 	 * faults, and loose the context of the original exception.
1449 	 */
1450 	if (!user_mode(regs))
1451 		goto sigill;
1452 
1453 	/* We restore the interrupt state now */
1454 	if (!arch_irq_disabled_regs(regs))
1455 		local_irq_enable();
1456 
1457 	/* (reason & REASON_ILLEGAL) would be the obvious thing here,
1458 	 * but there seems to be a hardware bug on the 405GP (RevD)
1459 	 * that means ESR is sometimes set incorrectly - either to
1460 	 * ESR_DST (!?) or 0.  In the process of chasing this with the
1461 	 * hardware people - not sure if it can happen on any illegal
1462 	 * instruction or only on FP instructions, whether there is a
1463 	 * pattern to occurrences etc. -dgibson 31/Mar/2003
1464 	 */
1465 	if (!emulate_math(regs))
1466 		goto bail;
1467 
1468 	/* Try to emulate it if we should. */
1469 	if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
1470 		switch (emulate_instruction(regs)) {
1471 		case 0:
1472 			regs->nip += 4;
1473 			emulate_single_step(regs);
1474 			goto bail;
1475 		case -EFAULT:
1476 			_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1477 			goto bail;
1478 		}
1479 	}
1480 
1481 sigill:
1482 	if (reason & REASON_PRIVILEGED)
1483 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1484 	else
1485 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1486 
1487 bail:
1488 	exception_exit(prev_state);
1489 }
1490 NOKPROBE_SYMBOL(program_check_exception);
1491 
1492 /*
1493  * This occurs when running in hypervisor mode on POWER6 or later
1494  * and an illegal instruction is encountered.
1495  */
1496 void emulation_assist_interrupt(struct pt_regs *regs)
1497 {
1498 	regs->msr |= REASON_ILLEGAL;
1499 	program_check_exception(regs);
1500 }
1501 NOKPROBE_SYMBOL(emulation_assist_interrupt);
1502 
1503 void alignment_exception(struct pt_regs *regs)
1504 {
1505 	enum ctx_state prev_state = exception_enter();
1506 	int sig, code, fixed = 0;
1507 
1508 	/* We restore the interrupt state now */
1509 	if (!arch_irq_disabled_regs(regs))
1510 		local_irq_enable();
1511 
1512 	if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
1513 		goto bail;
1514 
1515 	/* we don't implement logging of alignment exceptions */
1516 	if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
1517 		fixed = fix_alignment(regs);
1518 
1519 	if (fixed == 1) {
1520 		regs->nip += 4;	/* skip over emulated instruction */
1521 		emulate_single_step(regs);
1522 		goto bail;
1523 	}
1524 
1525 	/* Operand address was bad */
1526 	if (fixed == -EFAULT) {
1527 		sig = SIGSEGV;
1528 		code = SEGV_ACCERR;
1529 	} else {
1530 		sig = SIGBUS;
1531 		code = BUS_ADRALN;
1532 	}
1533 	if (user_mode(regs))
1534 		_exception(sig, regs, code, regs->dar);
1535 	else
1536 		bad_page_fault(regs, regs->dar, sig);
1537 
1538 bail:
1539 	exception_exit(prev_state);
1540 }
1541 
1542 void StackOverflow(struct pt_regs *regs)
1543 {
1544 	printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
1545 	       current, regs->gpr[1]);
1546 	debugger(regs);
1547 	show_regs(regs);
1548 	panic("kernel stack overflow");
1549 }
1550 
1551 void nonrecoverable_exception(struct pt_regs *regs)
1552 {
1553 	printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
1554 	       regs->nip, regs->msr);
1555 	debugger(regs);
1556 	die("nonrecoverable exception", regs, SIGKILL);
1557 }
1558 
1559 void kernel_fp_unavailable_exception(struct pt_regs *regs)
1560 {
1561 	enum ctx_state prev_state = exception_enter();
1562 
1563 	printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
1564 			  "%lx at %lx\n", regs->trap, regs->nip);
1565 	die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
1566 
1567 	exception_exit(prev_state);
1568 }
1569 
1570 void altivec_unavailable_exception(struct pt_regs *regs)
1571 {
1572 	enum ctx_state prev_state = exception_enter();
1573 
1574 	if (user_mode(regs)) {
1575 		/* A user program has executed an altivec instruction,
1576 		   but this kernel doesn't support altivec. */
1577 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1578 		goto bail;
1579 	}
1580 
1581 	printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
1582 			"%lx at %lx\n", regs->trap, regs->nip);
1583 	die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
1584 
1585 bail:
1586 	exception_exit(prev_state);
1587 }
1588 
1589 void vsx_unavailable_exception(struct pt_regs *regs)
1590 {
1591 	if (user_mode(regs)) {
1592 		/* A user program has executed an vsx instruction,
1593 		   but this kernel doesn't support vsx. */
1594 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1595 		return;
1596 	}
1597 
1598 	printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
1599 			"%lx at %lx\n", regs->trap, regs->nip);
1600 	die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
1601 }
1602 
1603 #ifdef CONFIG_PPC64
1604 static void tm_unavailable(struct pt_regs *regs)
1605 {
1606 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1607 	if (user_mode(regs)) {
1608 		current->thread.load_tm++;
1609 		regs->msr |= MSR_TM;
1610 		tm_enable();
1611 		tm_restore_sprs(&current->thread);
1612 		return;
1613 	}
1614 #endif
1615 	pr_emerg("Unrecoverable TM Unavailable Exception "
1616 			"%lx at %lx\n", regs->trap, regs->nip);
1617 	die("Unrecoverable TM Unavailable Exception", regs, SIGABRT);
1618 }
1619 
1620 void facility_unavailable_exception(struct pt_regs *regs)
1621 {
1622 	static char *facility_strings[] = {
1623 		[FSCR_FP_LG] = "FPU",
1624 		[FSCR_VECVSX_LG] = "VMX/VSX",
1625 		[FSCR_DSCR_LG] = "DSCR",
1626 		[FSCR_PM_LG] = "PMU SPRs",
1627 		[FSCR_BHRB_LG] = "BHRB",
1628 		[FSCR_TM_LG] = "TM",
1629 		[FSCR_EBB_LG] = "EBB",
1630 		[FSCR_TAR_LG] = "TAR",
1631 		[FSCR_MSGP_LG] = "MSGP",
1632 		[FSCR_SCV_LG] = "SCV",
1633 	};
1634 	char *facility = "unknown";
1635 	u64 value;
1636 	u32 instword, rd;
1637 	u8 status;
1638 	bool hv;
1639 
1640 	hv = (TRAP(regs) == 0xf80);
1641 	if (hv)
1642 		value = mfspr(SPRN_HFSCR);
1643 	else
1644 		value = mfspr(SPRN_FSCR);
1645 
1646 	status = value >> 56;
1647 	if ((hv || status >= 2) &&
1648 	    (status < ARRAY_SIZE(facility_strings)) &&
1649 	    facility_strings[status])
1650 		facility = facility_strings[status];
1651 
1652 	/* We should not have taken this interrupt in kernel */
1653 	if (!user_mode(regs)) {
1654 		pr_emerg("Facility '%s' unavailable (%d) exception in kernel mode at %lx\n",
1655 			 facility, status, regs->nip);
1656 		die("Unexpected facility unavailable exception", regs, SIGABRT);
1657 	}
1658 
1659 	/* We restore the interrupt state now */
1660 	if (!arch_irq_disabled_regs(regs))
1661 		local_irq_enable();
1662 
1663 	if (status == FSCR_DSCR_LG) {
1664 		/*
1665 		 * User is accessing the DSCR register using the problem
1666 		 * state only SPR number (0x03) either through a mfspr or
1667 		 * a mtspr instruction. If it is a write attempt through
1668 		 * a mtspr, then we set the inherit bit. This also allows
1669 		 * the user to write or read the register directly in the
1670 		 * future by setting via the FSCR DSCR bit. But in case it
1671 		 * is a read DSCR attempt through a mfspr instruction, we
1672 		 * just emulate the instruction instead. This code path will
1673 		 * always emulate all the mfspr instructions till the user
1674 		 * has attempted at least one mtspr instruction. This way it
1675 		 * preserves the same behaviour when the user is accessing
1676 		 * the DSCR through privilege level only SPR number (0x11)
1677 		 * which is emulated through illegal instruction exception.
1678 		 * We always leave HFSCR DSCR set.
1679 		 */
1680 		if (get_user(instword, (u32 __user *)(regs->nip))) {
1681 			pr_err("Failed to fetch the user instruction\n");
1682 			return;
1683 		}
1684 
1685 		/* Write into DSCR (mtspr 0x03, RS) */
1686 		if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK)
1687 				== PPC_INST_MTSPR_DSCR_USER) {
1688 			rd = (instword >> 21) & 0x1f;
1689 			current->thread.dscr = regs->gpr[rd];
1690 			current->thread.dscr_inherit = 1;
1691 			current->thread.fscr |= FSCR_DSCR;
1692 			mtspr(SPRN_FSCR, current->thread.fscr);
1693 		}
1694 
1695 		/* Read from DSCR (mfspr RT, 0x03) */
1696 		if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK)
1697 				== PPC_INST_MFSPR_DSCR_USER) {
1698 			if (emulate_instruction(regs)) {
1699 				pr_err("DSCR based mfspr emulation failed\n");
1700 				return;
1701 			}
1702 			regs->nip += 4;
1703 			emulate_single_step(regs);
1704 		}
1705 		return;
1706 	}
1707 
1708 	if (status == FSCR_TM_LG) {
1709 		/*
1710 		 * If we're here then the hardware is TM aware because it
1711 		 * generated an exception with FSRM_TM set.
1712 		 *
1713 		 * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware
1714 		 * told us not to do TM, or the kernel is not built with TM
1715 		 * support.
1716 		 *
1717 		 * If both of those things are true, then userspace can spam the
1718 		 * console by triggering the printk() below just by continually
1719 		 * doing tbegin (or any TM instruction). So in that case just
1720 		 * send the process a SIGILL immediately.
1721 		 */
1722 		if (!cpu_has_feature(CPU_FTR_TM))
1723 			goto out;
1724 
1725 		tm_unavailable(regs);
1726 		return;
1727 	}
1728 
1729 	pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n",
1730 		hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr);
1731 
1732 out:
1733 	_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1734 }
1735 #endif
1736 
1737 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1738 
1739 void fp_unavailable_tm(struct pt_regs *regs)
1740 {
1741 	/* Note:  This does not handle any kind of FP laziness. */
1742 
1743 	TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
1744 		 regs->nip, regs->msr);
1745 
1746         /* We can only have got here if the task started using FP after
1747          * beginning the transaction.  So, the transactional regs are just a
1748          * copy of the checkpointed ones.  But, we still need to recheckpoint
1749          * as we're enabling FP for the process; it will return, abort the
1750          * transaction, and probably retry but now with FP enabled.  So the
1751          * checkpointed FP registers need to be loaded.
1752 	 */
1753 	tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1754 	/* Reclaim didn't save out any FPRs to transact_fprs. */
1755 
1756 	/* Enable FP for the task: */
1757 	current->thread.load_fp = 1;
1758 
1759 	/* This loads and recheckpoints the FP registers from
1760 	 * thread.fpr[].  They will remain in registers after the
1761 	 * checkpoint so we don't need to reload them after.
1762 	 * If VMX is in use, the VRs now hold checkpointed values,
1763 	 * so we don't want to load the VRs from the thread_struct.
1764 	 */
1765 	tm_recheckpoint(&current->thread);
1766 }
1767 
1768 void altivec_unavailable_tm(struct pt_regs *regs)
1769 {
1770 	/* See the comments in fp_unavailable_tm().  This function operates
1771 	 * the same way.
1772 	 */
1773 
1774 	TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
1775 		 "MSR=%lx\n",
1776 		 regs->nip, regs->msr);
1777 	tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1778 	current->thread.load_vec = 1;
1779 	tm_recheckpoint(&current->thread);
1780 	current->thread.used_vr = 1;
1781 }
1782 
1783 void vsx_unavailable_tm(struct pt_regs *regs)
1784 {
1785 	/* See the comments in fp_unavailable_tm().  This works similarly,
1786 	 * though we're loading both FP and VEC registers in here.
1787 	 *
1788 	 * If FP isn't in use, load FP regs.  If VEC isn't in use, load VEC
1789 	 * regs.  Either way, set MSR_VSX.
1790 	 */
1791 
1792 	TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
1793 		 "MSR=%lx\n",
1794 		 regs->nip, regs->msr);
1795 
1796 	current->thread.used_vsr = 1;
1797 
1798 	/* This reclaims FP and/or VR regs if they're already enabled */
1799 	tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1800 
1801 	current->thread.load_vec = 1;
1802 	current->thread.load_fp = 1;
1803 
1804 	tm_recheckpoint(&current->thread);
1805 }
1806 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1807 
1808 void performance_monitor_exception(struct pt_regs *regs)
1809 {
1810 	__this_cpu_inc(irq_stat.pmu_irqs);
1811 
1812 	perf_irq(regs);
1813 }
1814 
1815 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1816 static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
1817 {
1818 	int changed = 0;
1819 	/*
1820 	 * Determine the cause of the debug event, clear the
1821 	 * event flags and send a trap to the handler. Torez
1822 	 */
1823 	if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1824 		dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1825 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1826 		current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
1827 #endif
1828 		do_send_trap(regs, mfspr(SPRN_DAC1), debug_status,
1829 			     5);
1830 		changed |= 0x01;
1831 	}  else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
1832 		dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
1833 		do_send_trap(regs, mfspr(SPRN_DAC2), debug_status,
1834 			     6);
1835 		changed |= 0x01;
1836 	}  else if (debug_status & DBSR_IAC1) {
1837 		current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
1838 		dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
1839 		do_send_trap(regs, mfspr(SPRN_IAC1), debug_status,
1840 			     1);
1841 		changed |= 0x01;
1842 	}  else if (debug_status & DBSR_IAC2) {
1843 		current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
1844 		do_send_trap(regs, mfspr(SPRN_IAC2), debug_status,
1845 			     2);
1846 		changed |= 0x01;
1847 	}  else if (debug_status & DBSR_IAC3) {
1848 		current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
1849 		dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
1850 		do_send_trap(regs, mfspr(SPRN_IAC3), debug_status,
1851 			     3);
1852 		changed |= 0x01;
1853 	}  else if (debug_status & DBSR_IAC4) {
1854 		current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
1855 		do_send_trap(regs, mfspr(SPRN_IAC4), debug_status,
1856 			     4);
1857 		changed |= 0x01;
1858 	}
1859 	/*
1860 	 * At the point this routine was called, the MSR(DE) was turned off.
1861 	 * Check all other debug flags and see if that bit needs to be turned
1862 	 * back on or not.
1863 	 */
1864 	if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
1865 			       current->thread.debug.dbcr1))
1866 		regs->msr |= MSR_DE;
1867 	else
1868 		/* Make sure the IDM flag is off */
1869 		current->thread.debug.dbcr0 &= ~DBCR0_IDM;
1870 
1871 	if (changed & 0x01)
1872 		mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
1873 }
1874 
1875 void DebugException(struct pt_regs *regs, unsigned long debug_status)
1876 {
1877 	current->thread.debug.dbsr = debug_status;
1878 
1879 	/* Hack alert: On BookE, Branch Taken stops on the branch itself, while
1880 	 * on server, it stops on the target of the branch. In order to simulate
1881 	 * the server behaviour, we thus restart right away with a single step
1882 	 * instead of stopping here when hitting a BT
1883 	 */
1884 	if (debug_status & DBSR_BT) {
1885 		regs->msr &= ~MSR_DE;
1886 
1887 		/* Disable BT */
1888 		mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
1889 		/* Clear the BT event */
1890 		mtspr(SPRN_DBSR, DBSR_BT);
1891 
1892 		/* Do the single step trick only when coming from userspace */
1893 		if (user_mode(regs)) {
1894 			current->thread.debug.dbcr0 &= ~DBCR0_BT;
1895 			current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1896 			regs->msr |= MSR_DE;
1897 			return;
1898 		}
1899 
1900 		if (kprobe_post_handler(regs))
1901 			return;
1902 
1903 		if (notify_die(DIE_SSTEP, "block_step", regs, 5,
1904 			       5, SIGTRAP) == NOTIFY_STOP) {
1905 			return;
1906 		}
1907 		if (debugger_sstep(regs))
1908 			return;
1909 	} else if (debug_status & DBSR_IC) { 	/* Instruction complete */
1910 		regs->msr &= ~MSR_DE;
1911 
1912 		/* Disable instruction completion */
1913 		mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
1914 		/* Clear the instruction completion event */
1915 		mtspr(SPRN_DBSR, DBSR_IC);
1916 
1917 		if (kprobe_post_handler(regs))
1918 			return;
1919 
1920 		if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1921 			       5, SIGTRAP) == NOTIFY_STOP) {
1922 			return;
1923 		}
1924 
1925 		if (debugger_sstep(regs))
1926 			return;
1927 
1928 		if (user_mode(regs)) {
1929 			current->thread.debug.dbcr0 &= ~DBCR0_IC;
1930 			if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
1931 					       current->thread.debug.dbcr1))
1932 				regs->msr |= MSR_DE;
1933 			else
1934 				/* Make sure the IDM bit is off */
1935 				current->thread.debug.dbcr0 &= ~DBCR0_IDM;
1936 		}
1937 
1938 		_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1939 	} else
1940 		handle_debug(regs, debug_status);
1941 }
1942 NOKPROBE_SYMBOL(DebugException);
1943 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1944 
1945 #if !defined(CONFIG_TAU_INT)
1946 void TAUException(struct pt_regs *regs)
1947 {
1948 	printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx    %s\n",
1949 	       regs->nip, regs->msr, regs->trap, print_tainted());
1950 }
1951 #endif /* CONFIG_INT_TAU */
1952 
1953 #ifdef CONFIG_ALTIVEC
1954 void altivec_assist_exception(struct pt_regs *regs)
1955 {
1956 	int err;
1957 
1958 	if (!user_mode(regs)) {
1959 		printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
1960 		       " at %lx\n", regs->nip);
1961 		die("Kernel VMX/Altivec assist exception", regs, SIGILL);
1962 	}
1963 
1964 	flush_altivec_to_thread(current);
1965 
1966 	PPC_WARN_EMULATED(altivec, regs);
1967 	err = emulate_altivec(regs);
1968 	if (err == 0) {
1969 		regs->nip += 4;		/* skip emulated instruction */
1970 		emulate_single_step(regs);
1971 		return;
1972 	}
1973 
1974 	if (err == -EFAULT) {
1975 		/* got an error reading the instruction */
1976 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1977 	} else {
1978 		/* didn't recognize the instruction */
1979 		/* XXX quick hack for now: set the non-Java bit in the VSCR */
1980 		printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
1981 				   "in %s at %lx\n", current->comm, regs->nip);
1982 		current->thread.vr_state.vscr.u[3] |= 0x10000;
1983 	}
1984 }
1985 #endif /* CONFIG_ALTIVEC */
1986 
1987 #ifdef CONFIG_FSL_BOOKE
1988 void CacheLockingException(struct pt_regs *regs, unsigned long address,
1989 			   unsigned long error_code)
1990 {
1991 	/* We treat cache locking instructions from the user
1992 	 * as priv ops, in the future we could try to do
1993 	 * something smarter
1994 	 */
1995 	if (error_code & (ESR_DLK|ESR_ILK))
1996 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1997 	return;
1998 }
1999 #endif /* CONFIG_FSL_BOOKE */
2000 
2001 #ifdef CONFIG_SPE
2002 void SPEFloatingPointException(struct pt_regs *regs)
2003 {
2004 	extern int do_spe_mathemu(struct pt_regs *regs);
2005 	unsigned long spefscr;
2006 	int fpexc_mode;
2007 	int code = FPE_FLTUNK;
2008 	int err;
2009 
2010 	flush_spe_to_thread(current);
2011 
2012 	spefscr = current->thread.spefscr;
2013 	fpexc_mode = current->thread.fpexc_mode;
2014 
2015 	if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
2016 		code = FPE_FLTOVF;
2017 	}
2018 	else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
2019 		code = FPE_FLTUND;
2020 	}
2021 	else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
2022 		code = FPE_FLTDIV;
2023 	else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
2024 		code = FPE_FLTINV;
2025 	}
2026 	else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
2027 		code = FPE_FLTRES;
2028 
2029 	err = do_spe_mathemu(regs);
2030 	if (err == 0) {
2031 		regs->nip += 4;		/* skip emulated instruction */
2032 		emulate_single_step(regs);
2033 		return;
2034 	}
2035 
2036 	if (err == -EFAULT) {
2037 		/* got an error reading the instruction */
2038 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
2039 	} else if (err == -EINVAL) {
2040 		/* didn't recognize the instruction */
2041 		printk(KERN_ERR "unrecognized spe instruction "
2042 		       "in %s at %lx\n", current->comm, regs->nip);
2043 	} else {
2044 		_exception(SIGFPE, regs, code, regs->nip);
2045 	}
2046 
2047 	return;
2048 }
2049 
2050 void SPEFloatingPointRoundException(struct pt_regs *regs)
2051 {
2052 	extern int speround_handler(struct pt_regs *regs);
2053 	int err;
2054 
2055 	preempt_disable();
2056 	if (regs->msr & MSR_SPE)
2057 		giveup_spe(current);
2058 	preempt_enable();
2059 
2060 	regs->nip -= 4;
2061 	err = speround_handler(regs);
2062 	if (err == 0) {
2063 		regs->nip += 4;		/* skip emulated instruction */
2064 		emulate_single_step(regs);
2065 		return;
2066 	}
2067 
2068 	if (err == -EFAULT) {
2069 		/* got an error reading the instruction */
2070 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
2071 	} else if (err == -EINVAL) {
2072 		/* didn't recognize the instruction */
2073 		printk(KERN_ERR "unrecognized spe instruction "
2074 		       "in %s at %lx\n", current->comm, regs->nip);
2075 	} else {
2076 		_exception(SIGFPE, regs, FPE_FLTUNK, regs->nip);
2077 		return;
2078 	}
2079 }
2080 #endif
2081 
2082 /*
2083  * We enter here if we get an unrecoverable exception, that is, one
2084  * that happened at a point where the RI (recoverable interrupt) bit
2085  * in the MSR is 0.  This indicates that SRR0/1 are live, and that
2086  * we therefore lost state by taking this exception.
2087  */
2088 void unrecoverable_exception(struct pt_regs *regs)
2089 {
2090 	printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
2091 	       regs->trap, regs->nip);
2092 	die("Unrecoverable exception", regs, SIGABRT);
2093 }
2094 NOKPROBE_SYMBOL(unrecoverable_exception);
2095 
2096 #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
2097 /*
2098  * Default handler for a Watchdog exception,
2099  * spins until a reboot occurs
2100  */
2101 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
2102 {
2103 	/* Generic WatchdogHandler, implement your own */
2104 	mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
2105 	return;
2106 }
2107 
2108 void WatchdogException(struct pt_regs *regs)
2109 {
2110 	printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
2111 	WatchdogHandler(regs);
2112 }
2113 #endif
2114 
2115 /*
2116  * We enter here if we discover during exception entry that we are
2117  * running in supervisor mode with a userspace value in the stack pointer.
2118  */
2119 void kernel_bad_stack(struct pt_regs *regs)
2120 {
2121 	printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
2122 	       regs->gpr[1], regs->nip);
2123 	die("Bad kernel stack pointer", regs, SIGABRT);
2124 }
2125 NOKPROBE_SYMBOL(kernel_bad_stack);
2126 
2127 void __init trap_init(void)
2128 {
2129 }
2130 
2131 
2132 #ifdef CONFIG_PPC_EMULATED_STATS
2133 
2134 #define WARN_EMULATED_SETUP(type)	.type = { .name = #type }
2135 
2136 struct ppc_emulated ppc_emulated = {
2137 #ifdef CONFIG_ALTIVEC
2138 	WARN_EMULATED_SETUP(altivec),
2139 #endif
2140 	WARN_EMULATED_SETUP(dcba),
2141 	WARN_EMULATED_SETUP(dcbz),
2142 	WARN_EMULATED_SETUP(fp_pair),
2143 	WARN_EMULATED_SETUP(isel),
2144 	WARN_EMULATED_SETUP(mcrxr),
2145 	WARN_EMULATED_SETUP(mfpvr),
2146 	WARN_EMULATED_SETUP(multiple),
2147 	WARN_EMULATED_SETUP(popcntb),
2148 	WARN_EMULATED_SETUP(spe),
2149 	WARN_EMULATED_SETUP(string),
2150 	WARN_EMULATED_SETUP(sync),
2151 	WARN_EMULATED_SETUP(unaligned),
2152 #ifdef CONFIG_MATH_EMULATION
2153 	WARN_EMULATED_SETUP(math),
2154 #endif
2155 #ifdef CONFIG_VSX
2156 	WARN_EMULATED_SETUP(vsx),
2157 #endif
2158 #ifdef CONFIG_PPC64
2159 	WARN_EMULATED_SETUP(mfdscr),
2160 	WARN_EMULATED_SETUP(mtdscr),
2161 	WARN_EMULATED_SETUP(lq_stq),
2162 	WARN_EMULATED_SETUP(lxvw4x),
2163 	WARN_EMULATED_SETUP(lxvh8x),
2164 	WARN_EMULATED_SETUP(lxvd2x),
2165 	WARN_EMULATED_SETUP(lxvb16x),
2166 #endif
2167 };
2168 
2169 u32 ppc_warn_emulated;
2170 
2171 void ppc_warn_emulated_print(const char *type)
2172 {
2173 	pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
2174 			    type);
2175 }
2176 
2177 static int __init ppc_warn_emulated_init(void)
2178 {
2179 	struct dentry *dir, *d;
2180 	unsigned int i;
2181 	struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
2182 
2183 	if (!powerpc_debugfs_root)
2184 		return -ENODEV;
2185 
2186 	dir = debugfs_create_dir("emulated_instructions",
2187 				 powerpc_debugfs_root);
2188 	if (!dir)
2189 		return -ENOMEM;
2190 
2191 	d = debugfs_create_u32("do_warn", 0644, dir,
2192 			       &ppc_warn_emulated);
2193 	if (!d)
2194 		goto fail;
2195 
2196 	for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
2197 		d = debugfs_create_u32(entries[i].name, 0644, dir,
2198 				       (u32 *)&entries[i].val.counter);
2199 		if (!d)
2200 			goto fail;
2201 	}
2202 
2203 	return 0;
2204 
2205 fail:
2206 	debugfs_remove_recursive(dir);
2207 	return -ENOMEM;
2208 }
2209 
2210 device_initcall(ppc_warn_emulated_init);
2211 
2212 #endif /* CONFIG_PPC_EMULATED_STATS */
2213