xref: /linux/arch/powerpc/kernel/traps.c (revision 99cd1302327a2ccaedf905e9f6a8d8fd234bd485)
114cf11afSPaul Mackerras /*
214cf11afSPaul Mackerras  *  Copyright (C) 1995-1996  Gary Thomas (gdt@linuxppc.org)
3fe04b112SScott Wood  *  Copyright 2007-2010 Freescale Semiconductor, Inc.
414cf11afSPaul Mackerras  *
514cf11afSPaul Mackerras  *  This program is free software; you can redistribute it and/or
614cf11afSPaul Mackerras  *  modify it under the terms of the GNU General Public License
714cf11afSPaul Mackerras  *  as published by the Free Software Foundation; either version
814cf11afSPaul Mackerras  *  2 of the License, or (at your option) any later version.
914cf11afSPaul Mackerras  *
1014cf11afSPaul Mackerras  *  Modified by Cort Dougan (cort@cs.nmt.edu)
1114cf11afSPaul Mackerras  *  and Paul Mackerras (paulus@samba.org)
1214cf11afSPaul Mackerras  */
1314cf11afSPaul Mackerras 
1414cf11afSPaul Mackerras /*
1514cf11afSPaul Mackerras  * This file handles the architecture-dependent parts of hardware exceptions
1614cf11afSPaul Mackerras  */
1714cf11afSPaul Mackerras 
1814cf11afSPaul Mackerras #include <linux/errno.h>
1914cf11afSPaul Mackerras #include <linux/sched.h>
20b17b0153SIngo Molnar #include <linux/sched/debug.h>
2114cf11afSPaul Mackerras #include <linux/kernel.h>
2214cf11afSPaul Mackerras #include <linux/mm.h>
23*99cd1302SRam Pai #include <linux/pkeys.h>
2414cf11afSPaul Mackerras #include <linux/stddef.h>
2514cf11afSPaul Mackerras #include <linux/unistd.h>
268dad3f92SPaul Mackerras #include <linux/ptrace.h>
2714cf11afSPaul Mackerras #include <linux/user.h>
2814cf11afSPaul Mackerras #include <linux/interrupt.h>
2914cf11afSPaul Mackerras #include <linux/init.h>
308a39b05fSPaul Gortmaker #include <linux/extable.h>
318a39b05fSPaul Gortmaker #include <linux/module.h>	/* print_modules */
328dad3f92SPaul Mackerras #include <linux/prctl.h>
3314cf11afSPaul Mackerras #include <linux/delay.h>
3414cf11afSPaul Mackerras #include <linux/kprobes.h>
35cc532915SMichael Ellerman #include <linux/kexec.h>
365474c120SMichael Hanselmann #include <linux/backlight.h>
3773c9ceabSJeremy Fitzhardinge #include <linux/bug.h>
381eeb66a1SChristoph Hellwig #include <linux/kdebug.h>
3976462232SChristian Dietrich #include <linux/ratelimit.h>
40ba12eedeSLi Zhong #include <linux/context_tracking.h>
415080332cSMichael Neuling #include <linux/smp.h>
4214cf11afSPaul Mackerras 
4380947e7cSGeert Uytterhoeven #include <asm/emulated_ops.h>
4414cf11afSPaul Mackerras #include <asm/pgtable.h>
457c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
467644d581SMichael Ellerman #include <asm/debugfs.h>
4714cf11afSPaul Mackerras #include <asm/io.h>
4886417780SPaul Mackerras #include <asm/machdep.h>
4986417780SPaul Mackerras #include <asm/rtas.h>
50f7f6f4feSDavid Gibson #include <asm/pmc.h>
5114cf11afSPaul Mackerras #include <asm/reg.h>
5214cf11afSPaul Mackerras #ifdef CONFIG_PMAC_BACKLIGHT
5314cf11afSPaul Mackerras #include <asm/backlight.h>
5414cf11afSPaul Mackerras #endif
55dc1c1ca3SStephen Rothwell #ifdef CONFIG_PPC64
5686417780SPaul Mackerras #include <asm/firmware.h>
57dc1c1ca3SStephen Rothwell #include <asm/processor.h>
586ce6c629SMichael Neuling #include <asm/tm.h>
59dc1c1ca3SStephen Rothwell #endif
60c0ce7d08SDavid Wilder #include <asm/kexec.h>
6116c57b36SKumar Gala #include <asm/ppc-opcode.h>
62cce1f106SShaohui Xie #include <asm/rio.h>
63ebaeb5aeSMahesh Salgaonkar #include <asm/fadump.h>
64ae3a197eSDavid Howells #include <asm/switch_to.h>
65f54db641SMichael Neuling #include <asm/tm.h>
66ae3a197eSDavid Howells #include <asm/debug.h>
6742f5b4caSDaniel Axtens #include <asm/asm-prototypes.h>
68fd7bacbcSMahesh Salgaonkar #include <asm/hmi.h>
694e0e3435SHongtao Jia #include <sysdev/fsl_pci.h>
706cc89badSNaveen N. Rao #include <asm/kprobes.h>
71dc1c1ca3SStephen Rothwell 
72da665885SThiago Jung Bauermann #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
735be3492fSAnton Blanchard int (*__debugger)(struct pt_regs *regs) __read_mostly;
745be3492fSAnton Blanchard int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
755be3492fSAnton Blanchard int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
765be3492fSAnton Blanchard int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
775be3492fSAnton Blanchard int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
789422de3eSMichael Neuling int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
795be3492fSAnton Blanchard int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
8014cf11afSPaul Mackerras 
8114cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger);
8214cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger_ipi);
8314cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger_bpt);
8414cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger_sstep);
8514cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger_iabr_match);
869422de3eSMichael Neuling EXPORT_SYMBOL(__debugger_break_match);
8714cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger_fault_handler);
8814cf11afSPaul Mackerras #endif
8914cf11afSPaul Mackerras 
908b3c34cfSMichael Neuling /* Transactional Memory trap debug */
918b3c34cfSMichael Neuling #ifdef TM_DEBUG_SW
928b3c34cfSMichael Neuling #define TM_DEBUG(x...) printk(KERN_INFO x)
938b3c34cfSMichael Neuling #else
948b3c34cfSMichael Neuling #define TM_DEBUG(x...) do { } while(0)
958b3c34cfSMichael Neuling #endif
968b3c34cfSMichael Neuling 
9714cf11afSPaul Mackerras /*
9814cf11afSPaul Mackerras  * Trap & Exception support
9914cf11afSPaul Mackerras  */
10014cf11afSPaul Mackerras 
1016031d9d9Santon@samba.org #ifdef CONFIG_PMAC_BACKLIGHT
1026031d9d9Santon@samba.org static void pmac_backlight_unblank(void)
1036031d9d9Santon@samba.org {
1046031d9d9Santon@samba.org 	mutex_lock(&pmac_backlight_mutex);
1056031d9d9Santon@samba.org 	if (pmac_backlight) {
1066031d9d9Santon@samba.org 		struct backlight_properties *props;
1076031d9d9Santon@samba.org 
1086031d9d9Santon@samba.org 		props = &pmac_backlight->props;
1096031d9d9Santon@samba.org 		props->brightness = props->max_brightness;
1106031d9d9Santon@samba.org 		props->power = FB_BLANK_UNBLANK;
1116031d9d9Santon@samba.org 		backlight_update_status(pmac_backlight);
1126031d9d9Santon@samba.org 	}
1136031d9d9Santon@samba.org 	mutex_unlock(&pmac_backlight_mutex);
1146031d9d9Santon@samba.org }
1156031d9d9Santon@samba.org #else
1166031d9d9Santon@samba.org static inline void pmac_backlight_unblank(void) { }
1176031d9d9Santon@samba.org #endif
1186031d9d9Santon@samba.org 
1196fcd6baaSNicholas Piggin /*
1206fcd6baaSNicholas Piggin  * If oops/die is expected to crash the machine, return true here.
1216fcd6baaSNicholas Piggin  *
1226fcd6baaSNicholas Piggin  * This should not be expected to be 100% accurate, there may be
1236fcd6baaSNicholas Piggin  * notifiers registered or other unexpected conditions that may bring
1246fcd6baaSNicholas Piggin  * down the kernel. Or if the current process in the kernel is holding
1256fcd6baaSNicholas Piggin  * locks or has other critical state, the kernel may become effectively
1266fcd6baaSNicholas Piggin  * unusable anyway.
1276fcd6baaSNicholas Piggin  */
1286fcd6baaSNicholas Piggin bool die_will_crash(void)
1296fcd6baaSNicholas Piggin {
1306fcd6baaSNicholas Piggin 	if (should_fadump_crash())
1316fcd6baaSNicholas Piggin 		return true;
1326fcd6baaSNicholas Piggin 	if (kexec_should_crash(current))
1336fcd6baaSNicholas Piggin 		return true;
1346fcd6baaSNicholas Piggin 	if (in_interrupt() || panic_on_oops ||
1356fcd6baaSNicholas Piggin 			!current->pid || is_global_init(current))
1366fcd6baaSNicholas Piggin 		return true;
1376fcd6baaSNicholas Piggin 
1386fcd6baaSNicholas Piggin 	return false;
1396fcd6baaSNicholas Piggin }
1406fcd6baaSNicholas Piggin 
141760ca4dcSAnton Blanchard static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
142760ca4dcSAnton Blanchard static int die_owner = -1;
143760ca4dcSAnton Blanchard static unsigned int die_nest_count;
144c0ce7d08SDavid Wilder static int die_counter;
145760ca4dcSAnton Blanchard 
14603465f89SNicholas Piggin static unsigned long oops_begin(struct pt_regs *regs)
147760ca4dcSAnton Blanchard {
148760ca4dcSAnton Blanchard 	int cpu;
14934c2a14fSanton@samba.org 	unsigned long flags;
15014cf11afSPaul Mackerras 
151293e4688Santon@samba.org 	oops_enter();
152293e4688Santon@samba.org 
153760ca4dcSAnton Blanchard 	/* racy, but better than risking deadlock. */
154760ca4dcSAnton Blanchard 	raw_local_irq_save(flags);
155760ca4dcSAnton Blanchard 	cpu = smp_processor_id();
156760ca4dcSAnton Blanchard 	if (!arch_spin_trylock(&die_lock)) {
157760ca4dcSAnton Blanchard 		if (cpu == die_owner)
158760ca4dcSAnton Blanchard 			/* nested oops. should stop eventually */;
159760ca4dcSAnton Blanchard 		else
160760ca4dcSAnton Blanchard 			arch_spin_lock(&die_lock);
161760ca4dcSAnton Blanchard 	}
162760ca4dcSAnton Blanchard 	die_nest_count++;
163760ca4dcSAnton Blanchard 	die_owner = cpu;
16414cf11afSPaul Mackerras 	console_verbose();
16514cf11afSPaul Mackerras 	bust_spinlocks(1);
1666031d9d9Santon@samba.org 	if (machine_is(powermac))
1676031d9d9Santon@samba.org 		pmac_backlight_unblank();
168760ca4dcSAnton Blanchard 	return flags;
16934c2a14fSanton@samba.org }
17003465f89SNicholas Piggin NOKPROBE_SYMBOL(oops_begin);
1715474c120SMichael Hanselmann 
17203465f89SNicholas Piggin static void oops_end(unsigned long flags, struct pt_regs *regs,
173760ca4dcSAnton Blanchard 			       int signr)
174760ca4dcSAnton Blanchard {
17514cf11afSPaul Mackerras 	bust_spinlocks(0);
176373d4d09SRusty Russell 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
177760ca4dcSAnton Blanchard 	die_nest_count--;
17858154c8cSAnton Blanchard 	oops_exit();
17958154c8cSAnton Blanchard 	printk("\n");
1807458e8b2SNicholas Piggin 	if (!die_nest_count) {
181760ca4dcSAnton Blanchard 		/* Nest count reaches zero, release the lock. */
1827458e8b2SNicholas Piggin 		die_owner = -1;
183760ca4dcSAnton Blanchard 		arch_spin_unlock(&die_lock);
1847458e8b2SNicholas Piggin 	}
185760ca4dcSAnton Blanchard 	raw_local_irq_restore(flags);
186cc532915SMichael Ellerman 
187ebaeb5aeSMahesh Salgaonkar 	crash_fadump(regs, "die oops");
188ebaeb5aeSMahesh Salgaonkar 
1894388c9b3SNicholas Piggin 	if (kexec_should_crash(current))
190cc532915SMichael Ellerman 		crash_kexec(regs);
1919b00ac06SAnton Blanchard 
192760ca4dcSAnton Blanchard 	if (!signr)
193760ca4dcSAnton Blanchard 		return;
194760ca4dcSAnton Blanchard 
19558154c8cSAnton Blanchard 	/*
19658154c8cSAnton Blanchard 	 * While our oops output is serialised by a spinlock, output
19758154c8cSAnton Blanchard 	 * from panic() called below can race and corrupt it. If we
19858154c8cSAnton Blanchard 	 * know we are going to panic, delay for 1 second so we have a
19958154c8cSAnton Blanchard 	 * chance to get clean backtraces from all CPUs that are oopsing.
20058154c8cSAnton Blanchard 	 */
20158154c8cSAnton Blanchard 	if (in_interrupt() || panic_on_oops || !current->pid ||
20258154c8cSAnton Blanchard 	    is_global_init(current)) {
20358154c8cSAnton Blanchard 		mdelay(MSEC_PER_SEC);
20458154c8cSAnton Blanchard 	}
20558154c8cSAnton Blanchard 
20614cf11afSPaul Mackerras 	if (in_interrupt())
20714cf11afSPaul Mackerras 		panic("Fatal exception in interrupt");
208cea6a4baSHorms 	if (panic_on_oops)
209012c437dSHorms 		panic("Fatal exception");
210760ca4dcSAnton Blanchard 	do_exit(signr);
211760ca4dcSAnton Blanchard }
21203465f89SNicholas Piggin NOKPROBE_SYMBOL(oops_end);
213cea6a4baSHorms 
21403465f89SNicholas Piggin static int __die(const char *str, struct pt_regs *regs, long err)
215760ca4dcSAnton Blanchard {
216760ca4dcSAnton Blanchard 	printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
2172e82ca3cSMichael Ellerman 
2182e82ca3cSMichael Ellerman 	if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
2192e82ca3cSMichael Ellerman 		printk("LE ");
2202e82ca3cSMichael Ellerman 	else
2212e82ca3cSMichael Ellerman 		printk("BE ");
2222e82ca3cSMichael Ellerman 
2231c56cd8eSMichael Ellerman 	if (IS_ENABLED(CONFIG_PREEMPT))
22472c0d9eeSMichael Ellerman 		pr_cont("PREEMPT ");
2251c56cd8eSMichael Ellerman 
2261c56cd8eSMichael Ellerman 	if (IS_ENABLED(CONFIG_SMP))
22772c0d9eeSMichael Ellerman 		pr_cont("SMP NR_CPUS=%d ", NR_CPUS);
2281c56cd8eSMichael Ellerman 
229e7df0d88SJoonsoo Kim 	if (debug_pagealloc_enabled())
23072c0d9eeSMichael Ellerman 		pr_cont("DEBUG_PAGEALLOC ");
2311c56cd8eSMichael Ellerman 
2321c56cd8eSMichael Ellerman 	if (IS_ENABLED(CONFIG_NUMA))
23372c0d9eeSMichael Ellerman 		pr_cont("NUMA ");
2341c56cd8eSMichael Ellerman 
23572c0d9eeSMichael Ellerman 	pr_cont("%s\n", ppc_md.name ? ppc_md.name : "");
236760ca4dcSAnton Blanchard 
237760ca4dcSAnton Blanchard 	if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
238760ca4dcSAnton Blanchard 		return 1;
239760ca4dcSAnton Blanchard 
240760ca4dcSAnton Blanchard 	print_modules();
241760ca4dcSAnton Blanchard 	show_regs(regs);
24214cf11afSPaul Mackerras 
24314cf11afSPaul Mackerras 	return 0;
24414cf11afSPaul Mackerras }
24503465f89SNicholas Piggin NOKPROBE_SYMBOL(__die);
24614cf11afSPaul Mackerras 
247760ca4dcSAnton Blanchard void die(const char *str, struct pt_regs *regs, long err)
248760ca4dcSAnton Blanchard {
2496f44b20eSNicholas Piggin 	unsigned long flags;
250760ca4dcSAnton Blanchard 
2516f44b20eSNicholas Piggin 	if (debugger(regs))
2526f44b20eSNicholas Piggin 		return;
2536f44b20eSNicholas Piggin 
2546f44b20eSNicholas Piggin 	flags = oops_begin(regs);
255760ca4dcSAnton Blanchard 	if (__die(str, regs, err))
256760ca4dcSAnton Blanchard 		err = 0;
257760ca4dcSAnton Blanchard 	oops_end(flags, regs, err);
258760ca4dcSAnton Blanchard }
25915770a13SNaveen N. Rao NOKPROBE_SYMBOL(die);
260760ca4dcSAnton Blanchard 
26125baa35bSOleg Nesterov void user_single_step_siginfo(struct task_struct *tsk,
26225baa35bSOleg Nesterov 				struct pt_regs *regs, siginfo_t *info)
26325baa35bSOleg Nesterov {
26425baa35bSOleg Nesterov 	memset(info, 0, sizeof(*info));
26525baa35bSOleg Nesterov 	info->si_signo = SIGTRAP;
26625baa35bSOleg Nesterov 	info->si_code = TRAP_TRACE;
26725baa35bSOleg Nesterov 	info->si_addr = (void __user *)regs->nip;
26825baa35bSOleg Nesterov }
26925baa35bSOleg Nesterov 
270*99cd1302SRam Pai 
271*99cd1302SRam Pai void _exception_pkey(int signr, struct pt_regs *regs, int code,
272*99cd1302SRam Pai 		unsigned long addr, int key)
27314cf11afSPaul Mackerras {
27414cf11afSPaul Mackerras 	siginfo_t info;
275d0c3d534SOlof Johansson 	const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \
276d0c3d534SOlof Johansson 			"at %08lx nip %08lx lr %08lx code %x\n";
277d0c3d534SOlof Johansson 	const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \
278d0c3d534SOlof Johansson 			"at %016lx nip %016lx lr %016lx code %x\n";
27914cf11afSPaul Mackerras 
28014cf11afSPaul Mackerras 	if (!user_mode(regs)) {
281760ca4dcSAnton Blanchard 		die("Exception in kernel mode", regs, signr);
28214cf11afSPaul Mackerras 		return;
283760ca4dcSAnton Blanchard 	}
284760ca4dcSAnton Blanchard 
285760ca4dcSAnton Blanchard 	if (show_unhandled_signals && unhandled_signal(current, signr)) {
28676462232SChristian Dietrich 		printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
287d0c3d534SOlof Johansson 				   current->comm, current->pid, signr,
288d0c3d534SOlof Johansson 				   addr, regs->nip, regs->link, code);
28914cf11afSPaul Mackerras 	}
29014cf11afSPaul Mackerras 
291a3512b2dSBenjamin Herrenschmidt 	if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
2929f2f79e3SBenjamin Herrenschmidt 		local_irq_enable();
2939f2f79e3SBenjamin Herrenschmidt 
29441ab5266SAnanth N Mavinakayanahalli 	current->thread.trap_nr = code;
29514cf11afSPaul Mackerras 	memset(&info, 0, sizeof(info));
29614cf11afSPaul Mackerras 	info.si_signo = signr;
29714cf11afSPaul Mackerras 	info.si_code = code;
29814cf11afSPaul Mackerras 	info.si_addr = (void __user *) addr;
299*99cd1302SRam Pai 	info.si_pkey = key;
300*99cd1302SRam Pai 
30114cf11afSPaul Mackerras 	force_sig_info(signr, &info, current);
30214cf11afSPaul Mackerras }
30314cf11afSPaul Mackerras 
304*99cd1302SRam Pai void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
305*99cd1302SRam Pai {
306*99cd1302SRam Pai 	_exception_pkey(signr, regs, code, addr, 0);
307*99cd1302SRam Pai }
308*99cd1302SRam Pai 
30914cf11afSPaul Mackerras void system_reset_exception(struct pt_regs *regs)
31014cf11afSPaul Mackerras {
3112b4f3ac5SNicholas Piggin 	/*
3122b4f3ac5SNicholas Piggin 	 * Avoid crashes in case of nested NMI exceptions. Recoverability
3132b4f3ac5SNicholas Piggin 	 * is determined by RI and in_nmi
3142b4f3ac5SNicholas Piggin 	 */
3152b4f3ac5SNicholas Piggin 	bool nested = in_nmi();
3162b4f3ac5SNicholas Piggin 	if (!nested)
3172b4f3ac5SNicholas Piggin 		nmi_enter();
3182b4f3ac5SNicholas Piggin 
319ca41ad43SNicholas Piggin 	__this_cpu_inc(irq_stat.sreset_irqs);
320ca41ad43SNicholas Piggin 
32114cf11afSPaul Mackerras 	/* See if any machine dependent calls */
322c902be71SArnd Bergmann 	if (ppc_md.system_reset_exception) {
323c902be71SArnd Bergmann 		if (ppc_md.system_reset_exception(regs))
324c4f3b52cSNicholas Piggin 			goto out;
325c902be71SArnd Bergmann 	}
32614cf11afSPaul Mackerras 
3274388c9b3SNicholas Piggin 	if (debugger(regs))
3284388c9b3SNicholas Piggin 		goto out;
3294388c9b3SNicholas Piggin 
3304388c9b3SNicholas Piggin 	/*
3314388c9b3SNicholas Piggin 	 * A system reset is a request to dump, so we always send
3324388c9b3SNicholas Piggin 	 * it through the crashdump code (if fadump or kdump are
3334388c9b3SNicholas Piggin 	 * registered).
3344388c9b3SNicholas Piggin 	 */
3354388c9b3SNicholas Piggin 	crash_fadump(regs, "System Reset");
3364388c9b3SNicholas Piggin 
3374388c9b3SNicholas Piggin 	crash_kexec(regs);
3384388c9b3SNicholas Piggin 
3394388c9b3SNicholas Piggin 	/*
3404388c9b3SNicholas Piggin 	 * We aren't the primary crash CPU. We need to send it
3414388c9b3SNicholas Piggin 	 * to a holding pattern to avoid it ending up in the panic
3424388c9b3SNicholas Piggin 	 * code.
3434388c9b3SNicholas Piggin 	 */
3444388c9b3SNicholas Piggin 	crash_kexec_secondary(regs);
3454388c9b3SNicholas Piggin 
3464388c9b3SNicholas Piggin 	/*
3474388c9b3SNicholas Piggin 	 * No debugger or crash dump registered, print logs then
3484388c9b3SNicholas Piggin 	 * panic.
3494388c9b3SNicholas Piggin 	 */
3504552d128SNicholas Piggin 	die("System Reset", regs, SIGABRT);
3514388c9b3SNicholas Piggin 
3524388c9b3SNicholas Piggin 	mdelay(2*MSEC_PER_SEC); /* Wait a little while for others to print */
3534388c9b3SNicholas Piggin 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
3544388c9b3SNicholas Piggin 	nmi_panic(regs, "System Reset");
35514cf11afSPaul Mackerras 
356c4f3b52cSNicholas Piggin out:
357c4f3b52cSNicholas Piggin #ifdef CONFIG_PPC_BOOK3S_64
358c4f3b52cSNicholas Piggin 	BUG_ON(get_paca()->in_nmi == 0);
359c4f3b52cSNicholas Piggin 	if (get_paca()->in_nmi > 1)
3604388c9b3SNicholas Piggin 		nmi_panic(regs, "Unrecoverable nested System Reset");
361c4f3b52cSNicholas Piggin #endif
36214cf11afSPaul Mackerras 	/* Must die if the interrupt is not recoverable */
36314cf11afSPaul Mackerras 	if (!(regs->msr & MSR_RI))
3644388c9b3SNicholas Piggin 		nmi_panic(regs, "Unrecoverable System Reset");
36514cf11afSPaul Mackerras 
3662b4f3ac5SNicholas Piggin 	if (!nested)
3672b4f3ac5SNicholas Piggin 		nmi_exit();
3682b4f3ac5SNicholas Piggin 
36914cf11afSPaul Mackerras 	/* What should we do here? We could issue a shutdown or hard reset. */
37014cf11afSPaul Mackerras }
3711e9b4507SMahesh Salgaonkar 
37214cf11afSPaul Mackerras /*
37314cf11afSPaul Mackerras  * I/O accesses can cause machine checks on powermacs.
37414cf11afSPaul Mackerras  * Check if the NIP corresponds to the address of a sync
37514cf11afSPaul Mackerras  * instruction for which there is an entry in the exception
37614cf11afSPaul Mackerras  * table.
37714cf11afSPaul Mackerras  * Note that the 601 only takes a machine check on TEA
37814cf11afSPaul Mackerras  * (transfer error ack) signal assertion, and does not
37914cf11afSPaul Mackerras  * set any of the top 16 bits of SRR1.
38014cf11afSPaul Mackerras  *  -- paulus.
38114cf11afSPaul Mackerras  */
38214cf11afSPaul Mackerras static inline int check_io_access(struct pt_regs *regs)
38314cf11afSPaul Mackerras {
38468a64357SBenjamin Herrenschmidt #ifdef CONFIG_PPC32
38514cf11afSPaul Mackerras 	unsigned long msr = regs->msr;
38614cf11afSPaul Mackerras 	const struct exception_table_entry *entry;
38714cf11afSPaul Mackerras 	unsigned int *nip = (unsigned int *)regs->nip;
38814cf11afSPaul Mackerras 
38914cf11afSPaul Mackerras 	if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
39014cf11afSPaul Mackerras 	    && (entry = search_exception_tables(regs->nip)) != NULL) {
39114cf11afSPaul Mackerras 		/*
39214cf11afSPaul Mackerras 		 * Check that it's a sync instruction, or somewhere
39314cf11afSPaul Mackerras 		 * in the twi; isync; nop sequence that inb/inw/inl uses.
39414cf11afSPaul Mackerras 		 * As the address is in the exception table
39514cf11afSPaul Mackerras 		 * we should be able to read the instr there.
39614cf11afSPaul Mackerras 		 * For the debug message, we look at the preceding
39714cf11afSPaul Mackerras 		 * load or store.
39814cf11afSPaul Mackerras 		 */
399ddc6cd0dSChristophe Leroy 		if (*nip == PPC_INST_NOP)
40014cf11afSPaul Mackerras 			nip -= 2;
401ddc6cd0dSChristophe Leroy 		else if (*nip == PPC_INST_ISYNC)
40214cf11afSPaul Mackerras 			--nip;
403ddc6cd0dSChristophe Leroy 		if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) {
40414cf11afSPaul Mackerras 			unsigned int rb;
40514cf11afSPaul Mackerras 
40614cf11afSPaul Mackerras 			--nip;
40714cf11afSPaul Mackerras 			rb = (*nip >> 11) & 0x1f;
40814cf11afSPaul Mackerras 			printk(KERN_DEBUG "%s bad port %lx at %p\n",
40914cf11afSPaul Mackerras 			       (*nip & 0x100)? "OUT to": "IN from",
41014cf11afSPaul Mackerras 			       regs->gpr[rb] - _IO_BASE, nip);
41114cf11afSPaul Mackerras 			regs->msr |= MSR_RI;
41261a92f70SNicholas Piggin 			regs->nip = extable_fixup(entry);
41314cf11afSPaul Mackerras 			return 1;
41414cf11afSPaul Mackerras 		}
41514cf11afSPaul Mackerras 	}
41668a64357SBenjamin Herrenschmidt #endif /* CONFIG_PPC32 */
41714cf11afSPaul Mackerras 	return 0;
41814cf11afSPaul Mackerras }
41914cf11afSPaul Mackerras 
420172ae2e7SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_REGS
42114cf11afSPaul Mackerras /* On 4xx, the reason for the machine check or program exception
42214cf11afSPaul Mackerras    is in the ESR. */
42314cf11afSPaul Mackerras #define get_reason(regs)	((regs)->dsisr)
42414cf11afSPaul Mackerras #define REASON_FP		ESR_FP
42514cf11afSPaul Mackerras #define REASON_ILLEGAL		(ESR_PIL | ESR_PUO)
42614cf11afSPaul Mackerras #define REASON_PRIVILEGED	ESR_PPR
42714cf11afSPaul Mackerras #define REASON_TRAP		ESR_PTR
42814cf11afSPaul Mackerras 
42914cf11afSPaul Mackerras /* single-step stuff */
43051ae8d4aSBharat Bhushan #define single_stepping(regs)	(current->thread.debug.dbcr0 & DBCR0_IC)
43151ae8d4aSBharat Bhushan #define clear_single_step(regs)	(current->thread.debug.dbcr0 &= ~DBCR0_IC)
43214cf11afSPaul Mackerras 
43314cf11afSPaul Mackerras #else
43414cf11afSPaul Mackerras /* On non-4xx, the reason for the machine check or program
43514cf11afSPaul Mackerras    exception is in the MSR. */
43614cf11afSPaul Mackerras #define get_reason(regs)	((regs)->msr)
437d30a5a52SMichael Ellerman #define REASON_TM		SRR1_PROGTM
438d30a5a52SMichael Ellerman #define REASON_FP		SRR1_PROGFPE
439d30a5a52SMichael Ellerman #define REASON_ILLEGAL		SRR1_PROGILL
440d30a5a52SMichael Ellerman #define REASON_PRIVILEGED	SRR1_PROGPRIV
441d30a5a52SMichael Ellerman #define REASON_TRAP		SRR1_PROGTRAP
44214cf11afSPaul Mackerras 
44314cf11afSPaul Mackerras #define single_stepping(regs)	((regs)->msr & MSR_SE)
44414cf11afSPaul Mackerras #define clear_single_step(regs)	((regs)->msr &= ~MSR_SE)
44514cf11afSPaul Mackerras #endif
44614cf11afSPaul Mackerras 
4470d0935b3SMichael Ellerman #if defined(CONFIG_E500)
448fe04b112SScott Wood int machine_check_e500mc(struct pt_regs *regs)
449fe04b112SScott Wood {
450fe04b112SScott Wood 	unsigned long mcsr = mfspr(SPRN_MCSR);
451a4e89ffbSMatt Weber 	unsigned long pvr = mfspr(SPRN_PVR);
452fe04b112SScott Wood 	unsigned long reason = mcsr;
453fe04b112SScott Wood 	int recoverable = 1;
454fe04b112SScott Wood 
45582a9a480SScott Wood 	if (reason & MCSR_LD) {
456cce1f106SShaohui Xie 		recoverable = fsl_rio_mcheck_exception(regs);
457cce1f106SShaohui Xie 		if (recoverable == 1)
458cce1f106SShaohui Xie 			goto silent_out;
459cce1f106SShaohui Xie 	}
460cce1f106SShaohui Xie 
461fe04b112SScott Wood 	printk("Machine check in kernel mode.\n");
462fe04b112SScott Wood 	printk("Caused by (from MCSR=%lx): ", reason);
463fe04b112SScott Wood 
464fe04b112SScott Wood 	if (reason & MCSR_MCP)
465fe04b112SScott Wood 		printk("Machine Check Signal\n");
466fe04b112SScott Wood 
467fe04b112SScott Wood 	if (reason & MCSR_ICPERR) {
468fe04b112SScott Wood 		printk("Instruction Cache Parity Error\n");
469fe04b112SScott Wood 
470fe04b112SScott Wood 		/*
471fe04b112SScott Wood 		 * This is recoverable by invalidating the i-cache.
472fe04b112SScott Wood 		 */
473fe04b112SScott Wood 		mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
474fe04b112SScott Wood 		while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
475fe04b112SScott Wood 			;
476fe04b112SScott Wood 
477fe04b112SScott Wood 		/*
478fe04b112SScott Wood 		 * This will generally be accompanied by an instruction
479fe04b112SScott Wood 		 * fetch error report -- only treat MCSR_IF as fatal
480fe04b112SScott Wood 		 * if it wasn't due to an L1 parity error.
481fe04b112SScott Wood 		 */
482fe04b112SScott Wood 		reason &= ~MCSR_IF;
483fe04b112SScott Wood 	}
484fe04b112SScott Wood 
485fe04b112SScott Wood 	if (reason & MCSR_DCPERR_MC) {
486fe04b112SScott Wood 		printk("Data Cache Parity Error\n");
48737caf9f2SKumar Gala 
48837caf9f2SKumar Gala 		/*
48937caf9f2SKumar Gala 		 * In write shadow mode we auto-recover from the error, but it
49037caf9f2SKumar Gala 		 * may still get logged and cause a machine check.  We should
49137caf9f2SKumar Gala 		 * only treat the non-write shadow case as non-recoverable.
49237caf9f2SKumar Gala 		 */
493a4e89ffbSMatt Weber 		/* On e6500 core, L1 DCWS (Data cache write shadow mode) bit
494a4e89ffbSMatt Weber 		 * is not implemented but L1 data cache always runs in write
495a4e89ffbSMatt Weber 		 * shadow mode. Hence on data cache parity errors HW will
496a4e89ffbSMatt Weber 		 * automatically invalidate the L1 Data Cache.
497a4e89ffbSMatt Weber 		 */
498a4e89ffbSMatt Weber 		if (PVR_VER(pvr) != PVR_VER_E6500) {
49937caf9f2SKumar Gala 			if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
500fe04b112SScott Wood 				recoverable = 0;
501fe04b112SScott Wood 		}
502a4e89ffbSMatt Weber 	}
503fe04b112SScott Wood 
504fe04b112SScott Wood 	if (reason & MCSR_L2MMU_MHIT) {
505fe04b112SScott Wood 		printk("Hit on multiple TLB entries\n");
506fe04b112SScott Wood 		recoverable = 0;
507fe04b112SScott Wood 	}
508fe04b112SScott Wood 
509fe04b112SScott Wood 	if (reason & MCSR_NMI)
510fe04b112SScott Wood 		printk("Non-maskable interrupt\n");
511fe04b112SScott Wood 
512fe04b112SScott Wood 	if (reason & MCSR_IF) {
513fe04b112SScott Wood 		printk("Instruction Fetch Error Report\n");
514fe04b112SScott Wood 		recoverable = 0;
515fe04b112SScott Wood 	}
516fe04b112SScott Wood 
517fe04b112SScott Wood 	if (reason & MCSR_LD) {
518fe04b112SScott Wood 		printk("Load Error Report\n");
519fe04b112SScott Wood 		recoverable = 0;
520fe04b112SScott Wood 	}
521fe04b112SScott Wood 
522fe04b112SScott Wood 	if (reason & MCSR_ST) {
523fe04b112SScott Wood 		printk("Store Error Report\n");
524fe04b112SScott Wood 		recoverable = 0;
525fe04b112SScott Wood 	}
526fe04b112SScott Wood 
527fe04b112SScott Wood 	if (reason & MCSR_LDG) {
528fe04b112SScott Wood 		printk("Guarded Load Error Report\n");
529fe04b112SScott Wood 		recoverable = 0;
530fe04b112SScott Wood 	}
531fe04b112SScott Wood 
532fe04b112SScott Wood 	if (reason & MCSR_TLBSYNC)
533fe04b112SScott Wood 		printk("Simultaneous tlbsync operations\n");
534fe04b112SScott Wood 
535fe04b112SScott Wood 	if (reason & MCSR_BSL2_ERR) {
536fe04b112SScott Wood 		printk("Level 2 Cache Error\n");
537fe04b112SScott Wood 		recoverable = 0;
538fe04b112SScott Wood 	}
539fe04b112SScott Wood 
540fe04b112SScott Wood 	if (reason & MCSR_MAV) {
541fe04b112SScott Wood 		u64 addr;
542fe04b112SScott Wood 
543fe04b112SScott Wood 		addr = mfspr(SPRN_MCAR);
544fe04b112SScott Wood 		addr |= (u64)mfspr(SPRN_MCARU) << 32;
545fe04b112SScott Wood 
546fe04b112SScott Wood 		printk("Machine Check %s Address: %#llx\n",
547fe04b112SScott Wood 		       reason & MCSR_MEA ? "Effective" : "Physical", addr);
548fe04b112SScott Wood 	}
549fe04b112SScott Wood 
550cce1f106SShaohui Xie silent_out:
551fe04b112SScott Wood 	mtspr(SPRN_MCSR, mcsr);
552fe04b112SScott Wood 	return mfspr(SPRN_MCSR) == 0 && recoverable;
553fe04b112SScott Wood }
554fe04b112SScott Wood 
55547c0bd1aSBenjamin Herrenschmidt int machine_check_e500(struct pt_regs *regs)
55647c0bd1aSBenjamin Herrenschmidt {
55742bff234SMichael Ellerman 	unsigned long reason = mfspr(SPRN_MCSR);
55847c0bd1aSBenjamin Herrenschmidt 
559cce1f106SShaohui Xie 	if (reason & MCSR_BUS_RBERR) {
560cce1f106SShaohui Xie 		if (fsl_rio_mcheck_exception(regs))
561cce1f106SShaohui Xie 			return 1;
5624e0e3435SHongtao Jia 		if (fsl_pci_mcheck_exception(regs))
5634e0e3435SHongtao Jia 			return 1;
564cce1f106SShaohui Xie 	}
565cce1f106SShaohui Xie 
56614cf11afSPaul Mackerras 	printk("Machine check in kernel mode.\n");
56714cf11afSPaul Mackerras 	printk("Caused by (from MCSR=%lx): ", reason);
56814cf11afSPaul Mackerras 
56914cf11afSPaul Mackerras 	if (reason & MCSR_MCP)
57014cf11afSPaul Mackerras 		printk("Machine Check Signal\n");
57114cf11afSPaul Mackerras 	if (reason & MCSR_ICPERR)
57214cf11afSPaul Mackerras 		printk("Instruction Cache Parity Error\n");
57314cf11afSPaul Mackerras 	if (reason & MCSR_DCP_PERR)
57414cf11afSPaul Mackerras 		printk("Data Cache Push Parity Error\n");
57514cf11afSPaul Mackerras 	if (reason & MCSR_DCPERR)
57614cf11afSPaul Mackerras 		printk("Data Cache Parity Error\n");
57714cf11afSPaul Mackerras 	if (reason & MCSR_BUS_IAERR)
57814cf11afSPaul Mackerras 		printk("Bus - Instruction Address Error\n");
57914cf11afSPaul Mackerras 	if (reason & MCSR_BUS_RAERR)
58014cf11afSPaul Mackerras 		printk("Bus - Read Address Error\n");
58114cf11afSPaul Mackerras 	if (reason & MCSR_BUS_WAERR)
58214cf11afSPaul Mackerras 		printk("Bus - Write Address Error\n");
58314cf11afSPaul Mackerras 	if (reason & MCSR_BUS_IBERR)
58414cf11afSPaul Mackerras 		printk("Bus - Instruction Data Error\n");
58514cf11afSPaul Mackerras 	if (reason & MCSR_BUS_RBERR)
58614cf11afSPaul Mackerras 		printk("Bus - Read Data Bus Error\n");
58714cf11afSPaul Mackerras 	if (reason & MCSR_BUS_WBERR)
588c1528339SWladislav Wiebe 		printk("Bus - Write Data Bus Error\n");
58914cf11afSPaul Mackerras 	if (reason & MCSR_BUS_IPERR)
59014cf11afSPaul Mackerras 		printk("Bus - Instruction Parity Error\n");
59114cf11afSPaul Mackerras 	if (reason & MCSR_BUS_RPERR)
59214cf11afSPaul Mackerras 		printk("Bus - Read Parity Error\n");
59347c0bd1aSBenjamin Herrenschmidt 
59447c0bd1aSBenjamin Herrenschmidt 	return 0;
59547c0bd1aSBenjamin Herrenschmidt }
5964490c06bSKumar Gala 
5974490c06bSKumar Gala int machine_check_generic(struct pt_regs *regs)
5984490c06bSKumar Gala {
5994490c06bSKumar Gala 	return 0;
6004490c06bSKumar Gala }
60114cf11afSPaul Mackerras #elif defined(CONFIG_E200)
60247c0bd1aSBenjamin Herrenschmidt int machine_check_e200(struct pt_regs *regs)
60347c0bd1aSBenjamin Herrenschmidt {
60442bff234SMichael Ellerman 	unsigned long reason = mfspr(SPRN_MCSR);
60547c0bd1aSBenjamin Herrenschmidt 
60614cf11afSPaul Mackerras 	printk("Machine check in kernel mode.\n");
60714cf11afSPaul Mackerras 	printk("Caused by (from MCSR=%lx): ", reason);
60814cf11afSPaul Mackerras 
60914cf11afSPaul Mackerras 	if (reason & MCSR_MCP)
61014cf11afSPaul Mackerras 		printk("Machine Check Signal\n");
61114cf11afSPaul Mackerras 	if (reason & MCSR_CP_PERR)
61214cf11afSPaul Mackerras 		printk("Cache Push Parity Error\n");
61314cf11afSPaul Mackerras 	if (reason & MCSR_CPERR)
61414cf11afSPaul Mackerras 		printk("Cache Parity Error\n");
61514cf11afSPaul Mackerras 	if (reason & MCSR_EXCP_ERR)
61614cf11afSPaul Mackerras 		printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
61714cf11afSPaul Mackerras 	if (reason & MCSR_BUS_IRERR)
61814cf11afSPaul Mackerras 		printk("Bus - Read Bus Error on instruction fetch\n");
61914cf11afSPaul Mackerras 	if (reason & MCSR_BUS_DRERR)
62014cf11afSPaul Mackerras 		printk("Bus - Read Bus Error on data load\n");
62114cf11afSPaul Mackerras 	if (reason & MCSR_BUS_WRERR)
62214cf11afSPaul Mackerras 		printk("Bus - Write Bus Error on buffered store or cache line push\n");
62347c0bd1aSBenjamin Herrenschmidt 
62447c0bd1aSBenjamin Herrenschmidt 	return 0;
62547c0bd1aSBenjamin Herrenschmidt }
6267f3f819eSMichael Ellerman #elif defined(CONFIG_PPC32)
62747c0bd1aSBenjamin Herrenschmidt int machine_check_generic(struct pt_regs *regs)
62847c0bd1aSBenjamin Herrenschmidt {
62942bff234SMichael Ellerman 	unsigned long reason = regs->msr;
63047c0bd1aSBenjamin Herrenschmidt 
63114cf11afSPaul Mackerras 	printk("Machine check in kernel mode.\n");
63214cf11afSPaul Mackerras 	printk("Caused by (from SRR1=%lx): ", reason);
63314cf11afSPaul Mackerras 	switch (reason & 0x601F0000) {
63414cf11afSPaul Mackerras 	case 0x80000:
63514cf11afSPaul Mackerras 		printk("Machine check signal\n");
63614cf11afSPaul Mackerras 		break;
63714cf11afSPaul Mackerras 	case 0:		/* for 601 */
63814cf11afSPaul Mackerras 	case 0x40000:
63914cf11afSPaul Mackerras 	case 0x140000:	/* 7450 MSS error and TEA */
64014cf11afSPaul Mackerras 		printk("Transfer error ack signal\n");
64114cf11afSPaul Mackerras 		break;
64214cf11afSPaul Mackerras 	case 0x20000:
64314cf11afSPaul Mackerras 		printk("Data parity error signal\n");
64414cf11afSPaul Mackerras 		break;
64514cf11afSPaul Mackerras 	case 0x10000:
64614cf11afSPaul Mackerras 		printk("Address parity error signal\n");
64714cf11afSPaul Mackerras 		break;
64814cf11afSPaul Mackerras 	case 0x20000000:
64914cf11afSPaul Mackerras 		printk("L1 Data Cache error\n");
65014cf11afSPaul Mackerras 		break;
65114cf11afSPaul Mackerras 	case 0x40000000:
65214cf11afSPaul Mackerras 		printk("L1 Instruction Cache error\n");
65314cf11afSPaul Mackerras 		break;
65414cf11afSPaul Mackerras 	case 0x00100000:
65514cf11afSPaul Mackerras 		printk("L2 data cache parity error\n");
65614cf11afSPaul Mackerras 		break;
65714cf11afSPaul Mackerras 	default:
65814cf11afSPaul Mackerras 		printk("Unknown values in msr\n");
65914cf11afSPaul Mackerras 	}
66075918a4bSOlof Johansson 	return 0;
66175918a4bSOlof Johansson }
66247c0bd1aSBenjamin Herrenschmidt #endif /* everything else */
66375918a4bSOlof Johansson 
66475918a4bSOlof Johansson void machine_check_exception(struct pt_regs *regs)
66575918a4bSOlof Johansson {
66675918a4bSOlof Johansson 	int recover = 0;
667b96672ddSNicholas Piggin 	bool nested = in_nmi();
668b96672ddSNicholas Piggin 	if (!nested)
669b96672ddSNicholas Piggin 		nmi_enter();
67075918a4bSOlof Johansson 
671f886f0f6SNicholas Piggin 	/* 64s accounts the mce in machine_check_early when in HVMODE */
672f886f0f6SNicholas Piggin 	if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !cpu_has_feature(CPU_FTR_HVMODE))
67369111bacSChristoph Lameter 		__this_cpu_inc(irq_stat.mce_exceptions);
67489713ed1SAnton Blanchard 
675d93b0ac0SMahesh Salgaonkar 	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
676d93b0ac0SMahesh Salgaonkar 
67747c0bd1aSBenjamin Herrenschmidt 	/* See if any machine dependent calls. In theory, we would want
67847c0bd1aSBenjamin Herrenschmidt 	 * to call the CPU first, and call the ppc_md. one if the CPU
67947c0bd1aSBenjamin Herrenschmidt 	 * one returns a positive number. However there is existing code
68047c0bd1aSBenjamin Herrenschmidt 	 * that assumes the board gets a first chance, so let's keep it
68147c0bd1aSBenjamin Herrenschmidt 	 * that way for now and fix things later. --BenH.
68247c0bd1aSBenjamin Herrenschmidt 	 */
68375918a4bSOlof Johansson 	if (ppc_md.machine_check_exception)
68475918a4bSOlof Johansson 		recover = ppc_md.machine_check_exception(regs);
68547c0bd1aSBenjamin Herrenschmidt 	else if (cur_cpu_spec->machine_check)
68647c0bd1aSBenjamin Herrenschmidt 		recover = cur_cpu_spec->machine_check(regs);
68775918a4bSOlof Johansson 
68847c0bd1aSBenjamin Herrenschmidt 	if (recover > 0)
689ba12eedeSLi Zhong 		goto bail;
69075918a4bSOlof Johansson 
691a443506bSAnton Blanchard 	if (debugger_fault_handler(regs))
692ba12eedeSLi Zhong 		goto bail;
69375918a4bSOlof Johansson 
69475918a4bSOlof Johansson 	if (check_io_access(regs))
695ba12eedeSLi Zhong 		goto bail;
69675918a4bSOlof Johansson 
6978dad3f92SPaul Mackerras 	die("Machine check", regs, SIGBUS);
69814cf11afSPaul Mackerras 
69914cf11afSPaul Mackerras 	/* Must die if the interrupt is not recoverable */
70014cf11afSPaul Mackerras 	if (!(regs->msr & MSR_RI))
701b96672ddSNicholas Piggin 		nmi_panic(regs, "Unrecoverable Machine check");
702ba12eedeSLi Zhong 
703ba12eedeSLi Zhong bail:
704b96672ddSNicholas Piggin 	if (!nested)
705b96672ddSNicholas Piggin 		nmi_exit();
70614cf11afSPaul Mackerras }
70714cf11afSPaul Mackerras 
70814cf11afSPaul Mackerras void SMIException(struct pt_regs *regs)
70914cf11afSPaul Mackerras {
71014cf11afSPaul Mackerras 	die("System Management Interrupt", regs, SIGABRT);
71114cf11afSPaul Mackerras }
71214cf11afSPaul Mackerras 
7135080332cSMichael Neuling #ifdef CONFIG_VSX
7145080332cSMichael Neuling static void p9_hmi_special_emu(struct pt_regs *regs)
7155080332cSMichael Neuling {
7165080332cSMichael Neuling 	unsigned int ra, rb, t, i, sel, instr, rc;
7175080332cSMichael Neuling 	const void __user *addr;
7185080332cSMichael Neuling 	u8 vbuf[16], *vdst;
7195080332cSMichael Neuling 	unsigned long ea, msr, msr_mask;
7205080332cSMichael Neuling 	bool swap;
7215080332cSMichael Neuling 
7225080332cSMichael Neuling 	if (__get_user_inatomic(instr, (unsigned int __user *)regs->nip))
7235080332cSMichael Neuling 		return;
7245080332cSMichael Neuling 
7255080332cSMichael Neuling 	/*
7265080332cSMichael Neuling 	 * lxvb16x	opcode: 0x7c0006d8
7275080332cSMichael Neuling 	 * lxvd2x	opcode: 0x7c000698
7285080332cSMichael Neuling 	 * lxvh8x	opcode: 0x7c000658
7295080332cSMichael Neuling 	 * lxvw4x	opcode: 0x7c000618
7305080332cSMichael Neuling 	 */
7315080332cSMichael Neuling 	if ((instr & 0xfc00073e) != 0x7c000618) {
7325080332cSMichael Neuling 		pr_devel("HMI vec emu: not vector CI %i:%s[%d] nip=%016lx"
7335080332cSMichael Neuling 			 " instr=%08x\n",
7345080332cSMichael Neuling 			 smp_processor_id(), current->comm, current->pid,
7355080332cSMichael Neuling 			 regs->nip, instr);
7365080332cSMichael Neuling 		return;
7375080332cSMichael Neuling 	}
7385080332cSMichael Neuling 
7395080332cSMichael Neuling 	/* Grab vector registers into the task struct */
7405080332cSMichael Neuling 	msr = regs->msr; /* Grab msr before we flush the bits */
7415080332cSMichael Neuling 	flush_vsx_to_thread(current);
7425080332cSMichael Neuling 	enable_kernel_altivec();
7435080332cSMichael Neuling 
7445080332cSMichael Neuling 	/*
7455080332cSMichael Neuling 	 * Is userspace running with a different endian (this is rare but
7465080332cSMichael Neuling 	 * not impossible)
7475080332cSMichael Neuling 	 */
7485080332cSMichael Neuling 	swap = (msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
7495080332cSMichael Neuling 
7505080332cSMichael Neuling 	/* Decode the instruction */
7515080332cSMichael Neuling 	ra = (instr >> 16) & 0x1f;
7525080332cSMichael Neuling 	rb = (instr >> 11) & 0x1f;
7535080332cSMichael Neuling 	t = (instr >> 21) & 0x1f;
7545080332cSMichael Neuling 	if (instr & 1)
7555080332cSMichael Neuling 		vdst = (u8 *)&current->thread.vr_state.vr[t];
7565080332cSMichael Neuling 	else
7575080332cSMichael Neuling 		vdst = (u8 *)&current->thread.fp_state.fpr[t][0];
7585080332cSMichael Neuling 
7595080332cSMichael Neuling 	/* Grab the vector address */
7605080332cSMichael Neuling 	ea = regs->gpr[rb] + (ra ? regs->gpr[ra] : 0);
7615080332cSMichael Neuling 	if (is_32bit_task())
7625080332cSMichael Neuling 		ea &= 0xfffffffful;
7635080332cSMichael Neuling 	addr = (__force const void __user *)ea;
7645080332cSMichael Neuling 
7655080332cSMichael Neuling 	/* Check it */
7665080332cSMichael Neuling 	if (!access_ok(VERIFY_READ, addr, 16)) {
7675080332cSMichael Neuling 		pr_devel("HMI vec emu: bad access %i:%s[%d] nip=%016lx"
7685080332cSMichael Neuling 			 " instr=%08x addr=%016lx\n",
7695080332cSMichael Neuling 			 smp_processor_id(), current->comm, current->pid,
7705080332cSMichael Neuling 			 regs->nip, instr, (unsigned long)addr);
7715080332cSMichael Neuling 		return;
7725080332cSMichael Neuling 	}
7735080332cSMichael Neuling 
7745080332cSMichael Neuling 	/* Read the vector */
7755080332cSMichael Neuling 	rc = 0;
7765080332cSMichael Neuling 	if ((unsigned long)addr & 0xfUL)
7775080332cSMichael Neuling 		/* unaligned case */
7785080332cSMichael Neuling 		rc = __copy_from_user_inatomic(vbuf, addr, 16);
7795080332cSMichael Neuling 	else
7805080332cSMichael Neuling 		__get_user_atomic_128_aligned(vbuf, addr, rc);
7815080332cSMichael Neuling 	if (rc) {
7825080332cSMichael Neuling 		pr_devel("HMI vec emu: page fault %i:%s[%d] nip=%016lx"
7835080332cSMichael Neuling 			 " instr=%08x addr=%016lx\n",
7845080332cSMichael Neuling 			 smp_processor_id(), current->comm, current->pid,
7855080332cSMichael Neuling 			 regs->nip, instr, (unsigned long)addr);
7865080332cSMichael Neuling 		return;
7875080332cSMichael Neuling 	}
7885080332cSMichael Neuling 
7895080332cSMichael Neuling 	pr_devel("HMI vec emu: emulated vector CI %i:%s[%d] nip=%016lx"
7905080332cSMichael Neuling 		 " instr=%08x addr=%016lx\n",
7915080332cSMichael Neuling 		 smp_processor_id(), current->comm, current->pid, regs->nip,
7925080332cSMichael Neuling 		 instr, (unsigned long) addr);
7935080332cSMichael Neuling 
7945080332cSMichael Neuling 	/* Grab instruction "selector" */
7955080332cSMichael Neuling 	sel = (instr >> 6) & 3;
7965080332cSMichael Neuling 
7975080332cSMichael Neuling 	/*
7985080332cSMichael Neuling 	 * Check to make sure the facility is actually enabled. This
7995080332cSMichael Neuling 	 * could happen if we get a false positive hit.
8005080332cSMichael Neuling 	 *
8015080332cSMichael Neuling 	 * lxvd2x/lxvw4x always check MSR VSX sel = 0,2
8025080332cSMichael Neuling 	 * lxvh8x/lxvb16x check MSR VSX or VEC depending on VSR used sel = 1,3
8035080332cSMichael Neuling 	 */
8045080332cSMichael Neuling 	msr_mask = MSR_VSX;
8055080332cSMichael Neuling 	if ((sel & 1) && (instr & 1)) /* lxvh8x & lxvb16x + VSR >= 32 */
8065080332cSMichael Neuling 		msr_mask = MSR_VEC;
8075080332cSMichael Neuling 	if (!(msr & msr_mask)) {
8085080332cSMichael Neuling 		pr_devel("HMI vec emu: MSR fac clear %i:%s[%d] nip=%016lx"
8095080332cSMichael Neuling 			 " instr=%08x msr:%016lx\n",
8105080332cSMichael Neuling 			 smp_processor_id(), current->comm, current->pid,
8115080332cSMichael Neuling 			 regs->nip, instr, msr);
8125080332cSMichael Neuling 		return;
8135080332cSMichael Neuling 	}
8145080332cSMichael Neuling 
8155080332cSMichael Neuling 	/* Do logging here before we modify sel based on endian */
8165080332cSMichael Neuling 	switch (sel) {
8175080332cSMichael Neuling 	case 0:	/* lxvw4x */
8185080332cSMichael Neuling 		PPC_WARN_EMULATED(lxvw4x, regs);
8195080332cSMichael Neuling 		break;
8205080332cSMichael Neuling 	case 1: /* lxvh8x */
8215080332cSMichael Neuling 		PPC_WARN_EMULATED(lxvh8x, regs);
8225080332cSMichael Neuling 		break;
8235080332cSMichael Neuling 	case 2: /* lxvd2x */
8245080332cSMichael Neuling 		PPC_WARN_EMULATED(lxvd2x, regs);
8255080332cSMichael Neuling 		break;
8265080332cSMichael Neuling 	case 3: /* lxvb16x */
8275080332cSMichael Neuling 		PPC_WARN_EMULATED(lxvb16x, regs);
8285080332cSMichael Neuling 		break;
8295080332cSMichael Neuling 	}
8305080332cSMichael Neuling 
8315080332cSMichael Neuling #ifdef __LITTLE_ENDIAN__
8325080332cSMichael Neuling 	/*
8335080332cSMichael Neuling 	 * An LE kernel stores the vector in the task struct as an LE
8345080332cSMichael Neuling 	 * byte array (effectively swapping both the components and
8355080332cSMichael Neuling 	 * the content of the components). Those instructions expect
8365080332cSMichael Neuling 	 * the components to remain in ascending address order, so we
8375080332cSMichael Neuling 	 * swap them back.
8385080332cSMichael Neuling 	 *
8395080332cSMichael Neuling 	 * If we are running a BE user space, the expectation is that
8405080332cSMichael Neuling 	 * of a simple memcpy, so forcing the emulation to look like
8415080332cSMichael Neuling 	 * a lxvb16x should do the trick.
8425080332cSMichael Neuling 	 */
8435080332cSMichael Neuling 	if (swap)
8445080332cSMichael Neuling 		sel = 3;
8455080332cSMichael Neuling 
8465080332cSMichael Neuling 	switch (sel) {
8475080332cSMichael Neuling 	case 0:	/* lxvw4x */
8485080332cSMichael Neuling 		for (i = 0; i < 4; i++)
8495080332cSMichael Neuling 			((u32 *)vdst)[i] = ((u32 *)vbuf)[3-i];
8505080332cSMichael Neuling 		break;
8515080332cSMichael Neuling 	case 1: /* lxvh8x */
8525080332cSMichael Neuling 		for (i = 0; i < 8; i++)
8535080332cSMichael Neuling 			((u16 *)vdst)[i] = ((u16 *)vbuf)[7-i];
8545080332cSMichael Neuling 		break;
8555080332cSMichael Neuling 	case 2: /* lxvd2x */
8565080332cSMichael Neuling 		for (i = 0; i < 2; i++)
8575080332cSMichael Neuling 			((u64 *)vdst)[i] = ((u64 *)vbuf)[1-i];
8585080332cSMichael Neuling 		break;
8595080332cSMichael Neuling 	case 3: /* lxvb16x */
8605080332cSMichael Neuling 		for (i = 0; i < 16; i++)
8615080332cSMichael Neuling 			vdst[i] = vbuf[15-i];
8625080332cSMichael Neuling 		break;
8635080332cSMichael Neuling 	}
8645080332cSMichael Neuling #else /* __LITTLE_ENDIAN__ */
8655080332cSMichael Neuling 	/* On a big endian kernel, a BE userspace only needs a memcpy */
8665080332cSMichael Neuling 	if (!swap)
8675080332cSMichael Neuling 		sel = 3;
8685080332cSMichael Neuling 
8695080332cSMichael Neuling 	/* Otherwise, we need to swap the content of the components */
8705080332cSMichael Neuling 	switch (sel) {
8715080332cSMichael Neuling 	case 0:	/* lxvw4x */
8725080332cSMichael Neuling 		for (i = 0; i < 4; i++)
8735080332cSMichael Neuling 			((u32 *)vdst)[i] = cpu_to_le32(((u32 *)vbuf)[i]);
8745080332cSMichael Neuling 		break;
8755080332cSMichael Neuling 	case 1: /* lxvh8x */
8765080332cSMichael Neuling 		for (i = 0; i < 8; i++)
8775080332cSMichael Neuling 			((u16 *)vdst)[i] = cpu_to_le16(((u16 *)vbuf)[i]);
8785080332cSMichael Neuling 		break;
8795080332cSMichael Neuling 	case 2: /* lxvd2x */
8805080332cSMichael Neuling 		for (i = 0; i < 2; i++)
8815080332cSMichael Neuling 			((u64 *)vdst)[i] = cpu_to_le64(((u64 *)vbuf)[i]);
8825080332cSMichael Neuling 		break;
8835080332cSMichael Neuling 	case 3: /* lxvb16x */
8845080332cSMichael Neuling 		memcpy(vdst, vbuf, 16);
8855080332cSMichael Neuling 		break;
8865080332cSMichael Neuling 	}
8875080332cSMichael Neuling #endif /* !__LITTLE_ENDIAN__ */
8885080332cSMichael Neuling 
8895080332cSMichael Neuling 	/* Go to next instruction */
8905080332cSMichael Neuling 	regs->nip += 4;
8915080332cSMichael Neuling }
8925080332cSMichael Neuling #endif /* CONFIG_VSX */
8935080332cSMichael Neuling 
8940869b6fdSMahesh Salgaonkar void handle_hmi_exception(struct pt_regs *regs)
8950869b6fdSMahesh Salgaonkar {
8960869b6fdSMahesh Salgaonkar 	struct pt_regs *old_regs;
8970869b6fdSMahesh Salgaonkar 
8980869b6fdSMahesh Salgaonkar 	old_regs = set_irq_regs(regs);
8990869b6fdSMahesh Salgaonkar 	irq_enter();
9000869b6fdSMahesh Salgaonkar 
9015080332cSMichael Neuling #ifdef CONFIG_VSX
9025080332cSMichael Neuling 	/* Real mode flagged P9 special emu is needed */
9035080332cSMichael Neuling 	if (local_paca->hmi_p9_special_emu) {
9045080332cSMichael Neuling 		local_paca->hmi_p9_special_emu = 0;
9055080332cSMichael Neuling 
9065080332cSMichael Neuling 		/*
9075080332cSMichael Neuling 		 * We don't want to take page faults while doing the
9085080332cSMichael Neuling 		 * emulation, we just replay the instruction if necessary.
9095080332cSMichael Neuling 		 */
9105080332cSMichael Neuling 		pagefault_disable();
9115080332cSMichael Neuling 		p9_hmi_special_emu(regs);
9125080332cSMichael Neuling 		pagefault_enable();
9135080332cSMichael Neuling 	}
9145080332cSMichael Neuling #endif /* CONFIG_VSX */
9155080332cSMichael Neuling 
9160869b6fdSMahesh Salgaonkar 	if (ppc_md.handle_hmi_exception)
9170869b6fdSMahesh Salgaonkar 		ppc_md.handle_hmi_exception(regs);
9180869b6fdSMahesh Salgaonkar 
9190869b6fdSMahesh Salgaonkar 	irq_exit();
9200869b6fdSMahesh Salgaonkar 	set_irq_regs(old_regs);
9210869b6fdSMahesh Salgaonkar }
9220869b6fdSMahesh Salgaonkar 
923dc1c1ca3SStephen Rothwell void unknown_exception(struct pt_regs *regs)
92414cf11afSPaul Mackerras {
925ba12eedeSLi Zhong 	enum ctx_state prev_state = exception_enter();
926ba12eedeSLi Zhong 
92714cf11afSPaul Mackerras 	printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
92814cf11afSPaul Mackerras 	       regs->nip, regs->msr, regs->trap);
92914cf11afSPaul Mackerras 
93014cf11afSPaul Mackerras 	_exception(SIGTRAP, regs, 0, 0);
931ba12eedeSLi Zhong 
932ba12eedeSLi Zhong 	exception_exit(prev_state);
93314cf11afSPaul Mackerras }
93414cf11afSPaul Mackerras 
935dc1c1ca3SStephen Rothwell void instruction_breakpoint_exception(struct pt_regs *regs)
93614cf11afSPaul Mackerras {
937ba12eedeSLi Zhong 	enum ctx_state prev_state = exception_enter();
938ba12eedeSLi Zhong 
93914cf11afSPaul Mackerras 	if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
94014cf11afSPaul Mackerras 					5, SIGTRAP) == NOTIFY_STOP)
941ba12eedeSLi Zhong 		goto bail;
94214cf11afSPaul Mackerras 	if (debugger_iabr_match(regs))
943ba12eedeSLi Zhong 		goto bail;
94414cf11afSPaul Mackerras 	_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
945ba12eedeSLi Zhong 
946ba12eedeSLi Zhong bail:
947ba12eedeSLi Zhong 	exception_exit(prev_state);
94814cf11afSPaul Mackerras }
94914cf11afSPaul Mackerras 
95014cf11afSPaul Mackerras void RunModeException(struct pt_regs *regs)
95114cf11afSPaul Mackerras {
95214cf11afSPaul Mackerras 	_exception(SIGTRAP, regs, 0, 0);
95314cf11afSPaul Mackerras }
95414cf11afSPaul Mackerras 
95503465f89SNicholas Piggin void single_step_exception(struct pt_regs *regs)
95614cf11afSPaul Mackerras {
957ba12eedeSLi Zhong 	enum ctx_state prev_state = exception_enter();
958ba12eedeSLi Zhong 
9592538c2d0SK.Prasad 	clear_single_step(regs);
96014cf11afSPaul Mackerras 
9616cc89badSNaveen N. Rao 	if (kprobe_post_handler(regs))
9626cc89badSNaveen N. Rao 		return;
9636cc89badSNaveen N. Rao 
96414cf11afSPaul Mackerras 	if (notify_die(DIE_SSTEP, "single_step", regs, 5,
96514cf11afSPaul Mackerras 					5, SIGTRAP) == NOTIFY_STOP)
966ba12eedeSLi Zhong 		goto bail;
96714cf11afSPaul Mackerras 	if (debugger_sstep(regs))
968ba12eedeSLi Zhong 		goto bail;
96914cf11afSPaul Mackerras 
97014cf11afSPaul Mackerras 	_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
971ba12eedeSLi Zhong 
972ba12eedeSLi Zhong bail:
973ba12eedeSLi Zhong 	exception_exit(prev_state);
97414cf11afSPaul Mackerras }
97503465f89SNicholas Piggin NOKPROBE_SYMBOL(single_step_exception);
97614cf11afSPaul Mackerras 
97714cf11afSPaul Mackerras /*
97814cf11afSPaul Mackerras  * After we have successfully emulated an instruction, we have to
97914cf11afSPaul Mackerras  * check if the instruction was being single-stepped, and if so,
98014cf11afSPaul Mackerras  * pretend we got a single-step exception.  This was pointed out
98114cf11afSPaul Mackerras  * by Kumar Gala.  -- paulus
98214cf11afSPaul Mackerras  */
9838dad3f92SPaul Mackerras static void emulate_single_step(struct pt_regs *regs)
98414cf11afSPaul Mackerras {
9852538c2d0SK.Prasad 	if (single_stepping(regs))
9862538c2d0SK.Prasad 		single_step_exception(regs);
98714cf11afSPaul Mackerras }
98814cf11afSPaul Mackerras 
9895fad293bSKumar Gala static inline int __parse_fpscr(unsigned long fpscr)
990dc1c1ca3SStephen Rothwell {
9915fad293bSKumar Gala 	int ret = 0;
992dc1c1ca3SStephen Rothwell 
993dc1c1ca3SStephen Rothwell 	/* Invalid operation */
994dc1c1ca3SStephen Rothwell 	if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
9955fad293bSKumar Gala 		ret = FPE_FLTINV;
996dc1c1ca3SStephen Rothwell 
997dc1c1ca3SStephen Rothwell 	/* Overflow */
998dc1c1ca3SStephen Rothwell 	else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
9995fad293bSKumar Gala 		ret = FPE_FLTOVF;
1000dc1c1ca3SStephen Rothwell 
1001dc1c1ca3SStephen Rothwell 	/* Underflow */
1002dc1c1ca3SStephen Rothwell 	else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
10035fad293bSKumar Gala 		ret = FPE_FLTUND;
1004dc1c1ca3SStephen Rothwell 
1005dc1c1ca3SStephen Rothwell 	/* Divide by zero */
1006dc1c1ca3SStephen Rothwell 	else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
10075fad293bSKumar Gala 		ret = FPE_FLTDIV;
1008dc1c1ca3SStephen Rothwell 
1009dc1c1ca3SStephen Rothwell 	/* Inexact result */
1010dc1c1ca3SStephen Rothwell 	else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
10115fad293bSKumar Gala 		ret = FPE_FLTRES;
10125fad293bSKumar Gala 
10135fad293bSKumar Gala 	return ret;
10145fad293bSKumar Gala }
10155fad293bSKumar Gala 
10165fad293bSKumar Gala static void parse_fpe(struct pt_regs *regs)
10175fad293bSKumar Gala {
10185fad293bSKumar Gala 	int code = 0;
10195fad293bSKumar Gala 
10205fad293bSKumar Gala 	flush_fp_to_thread(current);
10215fad293bSKumar Gala 
1022de79f7b9SPaul Mackerras 	code = __parse_fpscr(current->thread.fp_state.fpscr);
1023dc1c1ca3SStephen Rothwell 
1024dc1c1ca3SStephen Rothwell 	_exception(SIGFPE, regs, code, regs->nip);
1025dc1c1ca3SStephen Rothwell }
1026dc1c1ca3SStephen Rothwell 
1027dc1c1ca3SStephen Rothwell /*
1028dc1c1ca3SStephen Rothwell  * Illegal instruction emulation support.  Originally written to
102914cf11afSPaul Mackerras  * provide the PVR to user applications using the mfspr rd, PVR.
103014cf11afSPaul Mackerras  * Return non-zero if we can't emulate, or -EFAULT if the associated
103114cf11afSPaul Mackerras  * memory access caused an access fault.  Return zero on success.
103214cf11afSPaul Mackerras  *
103314cf11afSPaul Mackerras  * There are a couple of ways to do this, either "decode" the instruction
103414cf11afSPaul Mackerras  * or directly match lots of bits.  In this case, matching lots of
103514cf11afSPaul Mackerras  * bits is faster and easier.
103686417780SPaul Mackerras  *
103714cf11afSPaul Mackerras  */
103814cf11afSPaul Mackerras static int emulate_string_inst(struct pt_regs *regs, u32 instword)
103914cf11afSPaul Mackerras {
104014cf11afSPaul Mackerras 	u8 rT = (instword >> 21) & 0x1f;
104114cf11afSPaul Mackerras 	u8 rA = (instword >> 16) & 0x1f;
104214cf11afSPaul Mackerras 	u8 NB_RB = (instword >> 11) & 0x1f;
104314cf11afSPaul Mackerras 	u32 num_bytes;
104414cf11afSPaul Mackerras 	unsigned long EA;
104514cf11afSPaul Mackerras 	int pos = 0;
104614cf11afSPaul Mackerras 
104714cf11afSPaul Mackerras 	/* Early out if we are an invalid form of lswx */
104816c57b36SKumar Gala 	if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
104914cf11afSPaul Mackerras 		if ((rT == rA) || (rT == NB_RB))
105014cf11afSPaul Mackerras 			return -EINVAL;
105114cf11afSPaul Mackerras 
105214cf11afSPaul Mackerras 	EA = (rA == 0) ? 0 : regs->gpr[rA];
105314cf11afSPaul Mackerras 
105416c57b36SKumar Gala 	switch (instword & PPC_INST_STRING_MASK) {
105516c57b36SKumar Gala 		case PPC_INST_LSWX:
105616c57b36SKumar Gala 		case PPC_INST_STSWX:
105714cf11afSPaul Mackerras 			EA += NB_RB;
105814cf11afSPaul Mackerras 			num_bytes = regs->xer & 0x7f;
105914cf11afSPaul Mackerras 			break;
106016c57b36SKumar Gala 		case PPC_INST_LSWI:
106116c57b36SKumar Gala 		case PPC_INST_STSWI:
106214cf11afSPaul Mackerras 			num_bytes = (NB_RB == 0) ? 32 : NB_RB;
106314cf11afSPaul Mackerras 			break;
106414cf11afSPaul Mackerras 		default:
106514cf11afSPaul Mackerras 			return -EINVAL;
106614cf11afSPaul Mackerras 	}
106714cf11afSPaul Mackerras 
106814cf11afSPaul Mackerras 	while (num_bytes != 0)
106914cf11afSPaul Mackerras 	{
107014cf11afSPaul Mackerras 		u8 val;
107114cf11afSPaul Mackerras 		u32 shift = 8 * (3 - (pos & 0x3));
107214cf11afSPaul Mackerras 
107380aa0fb4SJames Yang 		/* if process is 32-bit, clear upper 32 bits of EA */
107480aa0fb4SJames Yang 		if ((regs->msr & MSR_64BIT) == 0)
107580aa0fb4SJames Yang 			EA &= 0xFFFFFFFF;
107680aa0fb4SJames Yang 
107716c57b36SKumar Gala 		switch ((instword & PPC_INST_STRING_MASK)) {
107816c57b36SKumar Gala 			case PPC_INST_LSWX:
107916c57b36SKumar Gala 			case PPC_INST_LSWI:
108014cf11afSPaul Mackerras 				if (get_user(val, (u8 __user *)EA))
108114cf11afSPaul Mackerras 					return -EFAULT;
108214cf11afSPaul Mackerras 				/* first time updating this reg,
108314cf11afSPaul Mackerras 				 * zero it out */
108414cf11afSPaul Mackerras 				if (pos == 0)
108514cf11afSPaul Mackerras 					regs->gpr[rT] = 0;
108614cf11afSPaul Mackerras 				regs->gpr[rT] |= val << shift;
108714cf11afSPaul Mackerras 				break;
108816c57b36SKumar Gala 			case PPC_INST_STSWI:
108916c57b36SKumar Gala 			case PPC_INST_STSWX:
109014cf11afSPaul Mackerras 				val = regs->gpr[rT] >> shift;
109114cf11afSPaul Mackerras 				if (put_user(val, (u8 __user *)EA))
109214cf11afSPaul Mackerras 					return -EFAULT;
109314cf11afSPaul Mackerras 				break;
109414cf11afSPaul Mackerras 		}
109514cf11afSPaul Mackerras 		/* move EA to next address */
109614cf11afSPaul Mackerras 		EA += 1;
109714cf11afSPaul Mackerras 		num_bytes--;
109814cf11afSPaul Mackerras 
109914cf11afSPaul Mackerras 		/* manage our position within the register */
110014cf11afSPaul Mackerras 		if (++pos == 4) {
110114cf11afSPaul Mackerras 			pos = 0;
110214cf11afSPaul Mackerras 			if (++rT == 32)
110314cf11afSPaul Mackerras 				rT = 0;
110414cf11afSPaul Mackerras 		}
110514cf11afSPaul Mackerras 	}
110614cf11afSPaul Mackerras 
110714cf11afSPaul Mackerras 	return 0;
110814cf11afSPaul Mackerras }
110914cf11afSPaul Mackerras 
1110c3412dcbSWill Schmidt static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
1111c3412dcbSWill Schmidt {
1112c3412dcbSWill Schmidt 	u32 ra,rs;
1113c3412dcbSWill Schmidt 	unsigned long tmp;
1114c3412dcbSWill Schmidt 
1115c3412dcbSWill Schmidt 	ra = (instword >> 16) & 0x1f;
1116c3412dcbSWill Schmidt 	rs = (instword >> 21) & 0x1f;
1117c3412dcbSWill Schmidt 
1118c3412dcbSWill Schmidt 	tmp = regs->gpr[rs];
1119c3412dcbSWill Schmidt 	tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
1120c3412dcbSWill Schmidt 	tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
1121c3412dcbSWill Schmidt 	tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1122c3412dcbSWill Schmidt 	regs->gpr[ra] = tmp;
1123c3412dcbSWill Schmidt 
1124c3412dcbSWill Schmidt 	return 0;
1125c3412dcbSWill Schmidt }
1126c3412dcbSWill Schmidt 
1127c1469f13SKumar Gala static int emulate_isel(struct pt_regs *regs, u32 instword)
1128c1469f13SKumar Gala {
1129c1469f13SKumar Gala 	u8 rT = (instword >> 21) & 0x1f;
1130c1469f13SKumar Gala 	u8 rA = (instword >> 16) & 0x1f;
1131c1469f13SKumar Gala 	u8 rB = (instword >> 11) & 0x1f;
1132c1469f13SKumar Gala 	u8 BC = (instword >> 6) & 0x1f;
1133c1469f13SKumar Gala 	u8 bit;
1134c1469f13SKumar Gala 	unsigned long tmp;
1135c1469f13SKumar Gala 
1136c1469f13SKumar Gala 	tmp = (rA == 0) ? 0 : regs->gpr[rA];
1137c1469f13SKumar Gala 	bit = (regs->ccr >> (31 - BC)) & 0x1;
1138c1469f13SKumar Gala 
1139c1469f13SKumar Gala 	regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
1140c1469f13SKumar Gala 
1141c1469f13SKumar Gala 	return 0;
1142c1469f13SKumar Gala }
1143c1469f13SKumar Gala 
11446ce6c629SMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
11456ce6c629SMichael Neuling static inline bool tm_abort_check(struct pt_regs *regs, int cause)
11466ce6c629SMichael Neuling {
11476ce6c629SMichael Neuling         /* If we're emulating a load/store in an active transaction, we cannot
11486ce6c629SMichael Neuling          * emulate it as the kernel operates in transaction suspended context.
11496ce6c629SMichael Neuling          * We need to abort the transaction.  This creates a persistent TM
11506ce6c629SMichael Neuling          * abort so tell the user what caused it with a new code.
11516ce6c629SMichael Neuling 	 */
11526ce6c629SMichael Neuling 	if (MSR_TM_TRANSACTIONAL(regs->msr)) {
11536ce6c629SMichael Neuling 		tm_enable();
11546ce6c629SMichael Neuling 		tm_abort(cause);
11556ce6c629SMichael Neuling 		return true;
11566ce6c629SMichael Neuling 	}
11576ce6c629SMichael Neuling 	return false;
11586ce6c629SMichael Neuling }
11596ce6c629SMichael Neuling #else
11606ce6c629SMichael Neuling static inline bool tm_abort_check(struct pt_regs *regs, int reason)
11616ce6c629SMichael Neuling {
11626ce6c629SMichael Neuling 	return false;
11636ce6c629SMichael Neuling }
11646ce6c629SMichael Neuling #endif
11656ce6c629SMichael Neuling 
116614cf11afSPaul Mackerras static int emulate_instruction(struct pt_regs *regs)
116714cf11afSPaul Mackerras {
116814cf11afSPaul Mackerras 	u32 instword;
116914cf11afSPaul Mackerras 	u32 rd;
117014cf11afSPaul Mackerras 
11714288e343SAnton Blanchard 	if (!user_mode(regs))
117214cf11afSPaul Mackerras 		return -EINVAL;
117314cf11afSPaul Mackerras 	CHECK_FULL_REGS(regs);
117414cf11afSPaul Mackerras 
117514cf11afSPaul Mackerras 	if (get_user(instword, (u32 __user *)(regs->nip)))
117614cf11afSPaul Mackerras 		return -EFAULT;
117714cf11afSPaul Mackerras 
117814cf11afSPaul Mackerras 	/* Emulate the mfspr rD, PVR. */
117916c57b36SKumar Gala 	if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
1180eecff81dSAnton Blanchard 		PPC_WARN_EMULATED(mfpvr, regs);
118114cf11afSPaul Mackerras 		rd = (instword >> 21) & 0x1f;
118214cf11afSPaul Mackerras 		regs->gpr[rd] = mfspr(SPRN_PVR);
118314cf11afSPaul Mackerras 		return 0;
118414cf11afSPaul Mackerras 	}
118514cf11afSPaul Mackerras 
118614cf11afSPaul Mackerras 	/* Emulating the dcba insn is just a no-op.  */
118780947e7cSGeert Uytterhoeven 	if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
1188eecff81dSAnton Blanchard 		PPC_WARN_EMULATED(dcba, regs);
118914cf11afSPaul Mackerras 		return 0;
119080947e7cSGeert Uytterhoeven 	}
119114cf11afSPaul Mackerras 
119214cf11afSPaul Mackerras 	/* Emulate the mcrxr insn.  */
119316c57b36SKumar Gala 	if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
119486417780SPaul Mackerras 		int shift = (instword >> 21) & 0x1c;
119514cf11afSPaul Mackerras 		unsigned long msk = 0xf0000000UL >> shift;
119614cf11afSPaul Mackerras 
1197eecff81dSAnton Blanchard 		PPC_WARN_EMULATED(mcrxr, regs);
119814cf11afSPaul Mackerras 		regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
119914cf11afSPaul Mackerras 		regs->xer &= ~0xf0000000UL;
120014cf11afSPaul Mackerras 		return 0;
120114cf11afSPaul Mackerras 	}
120214cf11afSPaul Mackerras 
120314cf11afSPaul Mackerras 	/* Emulate load/store string insn. */
120480947e7cSGeert Uytterhoeven 	if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
12056ce6c629SMichael Neuling 		if (tm_abort_check(regs,
12066ce6c629SMichael Neuling 				   TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
12076ce6c629SMichael Neuling 			return -EINVAL;
1208eecff81dSAnton Blanchard 		PPC_WARN_EMULATED(string, regs);
120914cf11afSPaul Mackerras 		return emulate_string_inst(regs, instword);
121080947e7cSGeert Uytterhoeven 	}
121114cf11afSPaul Mackerras 
1212c3412dcbSWill Schmidt 	/* Emulate the popcntb (Population Count Bytes) instruction. */
121316c57b36SKumar Gala 	if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
1214eecff81dSAnton Blanchard 		PPC_WARN_EMULATED(popcntb, regs);
1215c3412dcbSWill Schmidt 		return emulate_popcntb_inst(regs, instword);
1216c3412dcbSWill Schmidt 	}
1217c3412dcbSWill Schmidt 
1218c1469f13SKumar Gala 	/* Emulate isel (Integer Select) instruction */
121916c57b36SKumar Gala 	if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
1220eecff81dSAnton Blanchard 		PPC_WARN_EMULATED(isel, regs);
1221c1469f13SKumar Gala 		return emulate_isel(regs, instword);
1222c1469f13SKumar Gala 	}
1223c1469f13SKumar Gala 
12249863c28aSJames Yang 	/* Emulate sync instruction variants */
12259863c28aSJames Yang 	if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) {
12269863c28aSJames Yang 		PPC_WARN_EMULATED(sync, regs);
12279863c28aSJames Yang 		asm volatile("sync");
12289863c28aSJames Yang 		return 0;
12299863c28aSJames Yang 	}
12309863c28aSJames Yang 
1231efcac658SAlexey Kardashevskiy #ifdef CONFIG_PPC64
1232efcac658SAlexey Kardashevskiy 	/* Emulate the mfspr rD, DSCR. */
123373d2fb75SAnton Blanchard 	if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
123473d2fb75SAnton Blanchard 		PPC_INST_MFSPR_DSCR_USER) ||
123573d2fb75SAnton Blanchard 	     ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
123673d2fb75SAnton Blanchard 		PPC_INST_MFSPR_DSCR)) &&
1237efcac658SAlexey Kardashevskiy 			cpu_has_feature(CPU_FTR_DSCR)) {
1238efcac658SAlexey Kardashevskiy 		PPC_WARN_EMULATED(mfdscr, regs);
1239efcac658SAlexey Kardashevskiy 		rd = (instword >> 21) & 0x1f;
1240efcac658SAlexey Kardashevskiy 		regs->gpr[rd] = mfspr(SPRN_DSCR);
1241efcac658SAlexey Kardashevskiy 		return 0;
1242efcac658SAlexey Kardashevskiy 	}
1243efcac658SAlexey Kardashevskiy 	/* Emulate the mtspr DSCR, rD. */
124473d2fb75SAnton Blanchard 	if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
124573d2fb75SAnton Blanchard 		PPC_INST_MTSPR_DSCR_USER) ||
124673d2fb75SAnton Blanchard 	     ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
124773d2fb75SAnton Blanchard 		PPC_INST_MTSPR_DSCR)) &&
1248efcac658SAlexey Kardashevskiy 			cpu_has_feature(CPU_FTR_DSCR)) {
1249efcac658SAlexey Kardashevskiy 		PPC_WARN_EMULATED(mtdscr, regs);
1250efcac658SAlexey Kardashevskiy 		rd = (instword >> 21) & 0x1f;
125100ca0de0SAnton Blanchard 		current->thread.dscr = regs->gpr[rd];
1252efcac658SAlexey Kardashevskiy 		current->thread.dscr_inherit = 1;
125300ca0de0SAnton Blanchard 		mtspr(SPRN_DSCR, current->thread.dscr);
1254efcac658SAlexey Kardashevskiy 		return 0;
1255efcac658SAlexey Kardashevskiy 	}
1256efcac658SAlexey Kardashevskiy #endif
1257efcac658SAlexey Kardashevskiy 
125814cf11afSPaul Mackerras 	return -EINVAL;
125914cf11afSPaul Mackerras }
126014cf11afSPaul Mackerras 
126173c9ceabSJeremy Fitzhardinge int is_valid_bugaddr(unsigned long addr)
126214cf11afSPaul Mackerras {
126373c9ceabSJeremy Fitzhardinge 	return is_kernel_addr(addr);
126414cf11afSPaul Mackerras }
126514cf11afSPaul Mackerras 
12663a3b5aa6SKevin Hao #ifdef CONFIG_MATH_EMULATION
12673a3b5aa6SKevin Hao static int emulate_math(struct pt_regs *regs)
12683a3b5aa6SKevin Hao {
12693a3b5aa6SKevin Hao 	int ret;
12703a3b5aa6SKevin Hao 	extern int do_mathemu(struct pt_regs *regs);
12713a3b5aa6SKevin Hao 
12723a3b5aa6SKevin Hao 	ret = do_mathemu(regs);
12733a3b5aa6SKevin Hao 	if (ret >= 0)
12743a3b5aa6SKevin Hao 		PPC_WARN_EMULATED(math, regs);
12753a3b5aa6SKevin Hao 
12763a3b5aa6SKevin Hao 	switch (ret) {
12773a3b5aa6SKevin Hao 	case 0:
12783a3b5aa6SKevin Hao 		emulate_single_step(regs);
12793a3b5aa6SKevin Hao 		return 0;
12803a3b5aa6SKevin Hao 	case 1: {
12813a3b5aa6SKevin Hao 			int code = 0;
1282de79f7b9SPaul Mackerras 			code = __parse_fpscr(current->thread.fp_state.fpscr);
12833a3b5aa6SKevin Hao 			_exception(SIGFPE, regs, code, regs->nip);
12843a3b5aa6SKevin Hao 			return 0;
12853a3b5aa6SKevin Hao 		}
12863a3b5aa6SKevin Hao 	case -EFAULT:
12873a3b5aa6SKevin Hao 		_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
12883a3b5aa6SKevin Hao 		return 0;
12893a3b5aa6SKevin Hao 	}
12903a3b5aa6SKevin Hao 
12913a3b5aa6SKevin Hao 	return -1;
12923a3b5aa6SKevin Hao }
12933a3b5aa6SKevin Hao #else
12943a3b5aa6SKevin Hao static inline int emulate_math(struct pt_regs *regs) { return -1; }
12953a3b5aa6SKevin Hao #endif
12963a3b5aa6SKevin Hao 
129703465f89SNicholas Piggin void program_check_exception(struct pt_regs *regs)
129814cf11afSPaul Mackerras {
1299ba12eedeSLi Zhong 	enum ctx_state prev_state = exception_enter();
130014cf11afSPaul Mackerras 	unsigned int reason = get_reason(regs);
130114cf11afSPaul Mackerras 
1302aa42c69cSKim Phillips 	/* We can now get here via a FP Unavailable exception if the core
130304903a30SKumar Gala 	 * has no FPU, in that case the reason flags will be 0 */
130414cf11afSPaul Mackerras 
130514cf11afSPaul Mackerras 	if (reason & REASON_FP) {
130614cf11afSPaul Mackerras 		/* IEEE FP exception */
1307dc1c1ca3SStephen Rothwell 		parse_fpe(regs);
1308ba12eedeSLi Zhong 		goto bail;
13098dad3f92SPaul Mackerras 	}
13108dad3f92SPaul Mackerras 	if (reason & REASON_TRAP) {
1311a4c3f909SBalbir Singh 		unsigned long bugaddr;
1312ba797b28SJason Wessel 		/* Debugger is first in line to stop recursive faults in
1313ba797b28SJason Wessel 		 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1314ba797b28SJason Wessel 		if (debugger_bpt(regs))
1315ba12eedeSLi Zhong 			goto bail;
1316ba797b28SJason Wessel 
13176cc89badSNaveen N. Rao 		if (kprobe_handler(regs))
13186cc89badSNaveen N. Rao 			goto bail;
13196cc89badSNaveen N. Rao 
132014cf11afSPaul Mackerras 		/* trap exception */
1321dc1c1ca3SStephen Rothwell 		if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
1322dc1c1ca3SStephen Rothwell 				== NOTIFY_STOP)
1323ba12eedeSLi Zhong 			goto bail;
132473c9ceabSJeremy Fitzhardinge 
1325a4c3f909SBalbir Singh 		bugaddr = regs->nip;
1326a4c3f909SBalbir Singh 		/*
1327a4c3f909SBalbir Singh 		 * Fixup bugaddr for BUG_ON() in real mode
1328a4c3f909SBalbir Singh 		 */
1329a4c3f909SBalbir Singh 		if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR))
1330a4c3f909SBalbir Singh 			bugaddr += PAGE_OFFSET;
1331a4c3f909SBalbir Singh 
133273c9ceabSJeremy Fitzhardinge 		if (!(regs->msr & MSR_PR) &&  /* not user-mode */
1333a4c3f909SBalbir Singh 		    report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
133414cf11afSPaul Mackerras 			regs->nip += 4;
1335ba12eedeSLi Zhong 			goto bail;
133614cf11afSPaul Mackerras 		}
13378dad3f92SPaul Mackerras 		_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1338ba12eedeSLi Zhong 		goto bail;
13398dad3f92SPaul Mackerras 	}
1340bc2a9408SMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1341bc2a9408SMichael Neuling 	if (reason & REASON_TM) {
1342bc2a9408SMichael Neuling 		/* This is a TM "Bad Thing Exception" program check.
1343bc2a9408SMichael Neuling 		 * This occurs when:
1344bc2a9408SMichael Neuling 		 * -  An rfid/hrfid/mtmsrd attempts to cause an illegal
1345bc2a9408SMichael Neuling 		 *    transition in TM states.
1346bc2a9408SMichael Neuling 		 * -  A trechkpt is attempted when transactional.
1347bc2a9408SMichael Neuling 		 * -  A treclaim is attempted when non transactional.
1348bc2a9408SMichael Neuling 		 * -  A tend is illegally attempted.
1349bc2a9408SMichael Neuling 		 * -  writing a TM SPR when transactional.
1350632f0574SMichael Ellerman 		 *
1351632f0574SMichael Ellerman 		 * If usermode caused this, it's done something illegal and
1352bc2a9408SMichael Neuling 		 * gets a SIGILL slap on the wrist.  We call it an illegal
1353bc2a9408SMichael Neuling 		 * operand to distinguish from the instruction just being bad
1354bc2a9408SMichael Neuling 		 * (e.g. executing a 'tend' on a CPU without TM!); it's an
1355bc2a9408SMichael Neuling 		 * illegal /placement/ of a valid instruction.
1356bc2a9408SMichael Neuling 		 */
1357bc2a9408SMichael Neuling 		if (user_mode(regs)) {
1358bc2a9408SMichael Neuling 			_exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
1359ba12eedeSLi Zhong 			goto bail;
1360bc2a9408SMichael Neuling 		} else {
1361bc2a9408SMichael Neuling 			printk(KERN_EMERG "Unexpected TM Bad Thing exception "
1362bc2a9408SMichael Neuling 			       "at %lx (msr 0x%x)\n", regs->nip, reason);
1363bc2a9408SMichael Neuling 			die("Unrecoverable exception", regs, SIGABRT);
1364bc2a9408SMichael Neuling 		}
1365bc2a9408SMichael Neuling 	}
1366bc2a9408SMichael Neuling #endif
13678dad3f92SPaul Mackerras 
1368b3f6a459SMichael Ellerman 	/*
1369b3f6a459SMichael Ellerman 	 * If we took the program check in the kernel skip down to sending a
1370b3f6a459SMichael Ellerman 	 * SIGILL. The subsequent cases all relate to emulating instructions
1371b3f6a459SMichael Ellerman 	 * which we should only do for userspace. We also do not want to enable
1372b3f6a459SMichael Ellerman 	 * interrupts for kernel faults because that might lead to further
1373b3f6a459SMichael Ellerman 	 * faults, and loose the context of the original exception.
1374b3f6a459SMichael Ellerman 	 */
1375b3f6a459SMichael Ellerman 	if (!user_mode(regs))
1376b3f6a459SMichael Ellerman 		goto sigill;
1377b3f6a459SMichael Ellerman 
1378a3512b2dSBenjamin Herrenschmidt 	/* We restore the interrupt state now */
1379a3512b2dSBenjamin Herrenschmidt 	if (!arch_irq_disabled_regs(regs))
1380cd8a5673SPaul Mackerras 		local_irq_enable();
1381cd8a5673SPaul Mackerras 
138204903a30SKumar Gala 	/* (reason & REASON_ILLEGAL) would be the obvious thing here,
138304903a30SKumar Gala 	 * but there seems to be a hardware bug on the 405GP (RevD)
138404903a30SKumar Gala 	 * that means ESR is sometimes set incorrectly - either to
138504903a30SKumar Gala 	 * ESR_DST (!?) or 0.  In the process of chasing this with the
138604903a30SKumar Gala 	 * hardware people - not sure if it can happen on any illegal
138704903a30SKumar Gala 	 * instruction or only on FP instructions, whether there is a
13884e63f8edSBenjamin Herrenschmidt 	 * pattern to occurrences etc. -dgibson 31/Mar/2003
13894e63f8edSBenjamin Herrenschmidt 	 */
13903a3b5aa6SKevin Hao 	if (!emulate_math(regs))
1391ba12eedeSLi Zhong 		goto bail;
139204903a30SKumar Gala 
13938dad3f92SPaul Mackerras 	/* Try to emulate it if we should. */
13948dad3f92SPaul Mackerras 	if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
139514cf11afSPaul Mackerras 		switch (emulate_instruction(regs)) {
139614cf11afSPaul Mackerras 		case 0:
139714cf11afSPaul Mackerras 			regs->nip += 4;
139814cf11afSPaul Mackerras 			emulate_single_step(regs);
1399ba12eedeSLi Zhong 			goto bail;
140014cf11afSPaul Mackerras 		case -EFAULT:
140114cf11afSPaul Mackerras 			_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1402ba12eedeSLi Zhong 			goto bail;
14038dad3f92SPaul Mackerras 		}
14048dad3f92SPaul Mackerras 	}
14058dad3f92SPaul Mackerras 
1406b3f6a459SMichael Ellerman sigill:
140714cf11afSPaul Mackerras 	if (reason & REASON_PRIVILEGED)
140814cf11afSPaul Mackerras 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
140914cf11afSPaul Mackerras 	else
141014cf11afSPaul Mackerras 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1411ba12eedeSLi Zhong 
1412ba12eedeSLi Zhong bail:
1413ba12eedeSLi Zhong 	exception_exit(prev_state);
141414cf11afSPaul Mackerras }
141503465f89SNicholas Piggin NOKPROBE_SYMBOL(program_check_exception);
141614cf11afSPaul Mackerras 
1417bf593907SPaul Mackerras /*
1418bf593907SPaul Mackerras  * This occurs when running in hypervisor mode on POWER6 or later
1419bf593907SPaul Mackerras  * and an illegal instruction is encountered.
1420bf593907SPaul Mackerras  */
142103465f89SNicholas Piggin void emulation_assist_interrupt(struct pt_regs *regs)
1422bf593907SPaul Mackerras {
1423bf593907SPaul Mackerras 	regs->msr |= REASON_ILLEGAL;
1424bf593907SPaul Mackerras 	program_check_exception(regs);
1425bf593907SPaul Mackerras }
142603465f89SNicholas Piggin NOKPROBE_SYMBOL(emulation_assist_interrupt);
1427bf593907SPaul Mackerras 
1428dc1c1ca3SStephen Rothwell void alignment_exception(struct pt_regs *regs)
142914cf11afSPaul Mackerras {
1430ba12eedeSLi Zhong 	enum ctx_state prev_state = exception_enter();
14314393c4f6SBenjamin Herrenschmidt 	int sig, code, fixed = 0;
143214cf11afSPaul Mackerras 
1433a3512b2dSBenjamin Herrenschmidt 	/* We restore the interrupt state now */
1434a3512b2dSBenjamin Herrenschmidt 	if (!arch_irq_disabled_regs(regs))
1435a3512b2dSBenjamin Herrenschmidt 		local_irq_enable();
1436a3512b2dSBenjamin Herrenschmidt 
14376ce6c629SMichael Neuling 	if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
14386ce6c629SMichael Neuling 		goto bail;
14396ce6c629SMichael Neuling 
1440e9370ae1SPaul Mackerras 	/* we don't implement logging of alignment exceptions */
1441e9370ae1SPaul Mackerras 	if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
144214cf11afSPaul Mackerras 		fixed = fix_alignment(regs);
144314cf11afSPaul Mackerras 
144414cf11afSPaul Mackerras 	if (fixed == 1) {
144514cf11afSPaul Mackerras 		regs->nip += 4;	/* skip over emulated instruction */
144614cf11afSPaul Mackerras 		emulate_single_step(regs);
1447ba12eedeSLi Zhong 		goto bail;
144814cf11afSPaul Mackerras 	}
144914cf11afSPaul Mackerras 
145014cf11afSPaul Mackerras 	/* Operand address was bad */
145114cf11afSPaul Mackerras 	if (fixed == -EFAULT) {
14524393c4f6SBenjamin Herrenschmidt 		sig = SIGSEGV;
14534393c4f6SBenjamin Herrenschmidt 		code = SEGV_ACCERR;
14544393c4f6SBenjamin Herrenschmidt 	} else {
14554393c4f6SBenjamin Herrenschmidt 		sig = SIGBUS;
14564393c4f6SBenjamin Herrenschmidt 		code = BUS_ADRALN;
145714cf11afSPaul Mackerras 	}
14584393c4f6SBenjamin Herrenschmidt 	if (user_mode(regs))
14594393c4f6SBenjamin Herrenschmidt 		_exception(sig, regs, code, regs->dar);
14604393c4f6SBenjamin Herrenschmidt 	else
14614393c4f6SBenjamin Herrenschmidt 		bad_page_fault(regs, regs->dar, sig);
1462ba12eedeSLi Zhong 
1463ba12eedeSLi Zhong bail:
1464ba12eedeSLi Zhong 	exception_exit(prev_state);
146514cf11afSPaul Mackerras }
146614cf11afSPaul Mackerras 
1467f0f558b1SPaul Mackerras void slb_miss_bad_addr(struct pt_regs *regs)
1468f0f558b1SPaul Mackerras {
1469f0f558b1SPaul Mackerras 	enum ctx_state prev_state = exception_enter();
1470f0f558b1SPaul Mackerras 
1471f0f558b1SPaul Mackerras 	if (user_mode(regs))
1472f0f558b1SPaul Mackerras 		_exception(SIGSEGV, regs, SEGV_BNDERR, regs->dar);
1473f0f558b1SPaul Mackerras 	else
1474f0f558b1SPaul Mackerras 		bad_page_fault(regs, regs->dar, SIGSEGV);
1475f0f558b1SPaul Mackerras 
1476f0f558b1SPaul Mackerras 	exception_exit(prev_state);
1477f0f558b1SPaul Mackerras }
1478f0f558b1SPaul Mackerras 
147914cf11afSPaul Mackerras void StackOverflow(struct pt_regs *regs)
148014cf11afSPaul Mackerras {
148114cf11afSPaul Mackerras 	printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
148214cf11afSPaul Mackerras 	       current, regs->gpr[1]);
148314cf11afSPaul Mackerras 	debugger(regs);
148414cf11afSPaul Mackerras 	show_regs(regs);
148514cf11afSPaul Mackerras 	panic("kernel stack overflow");
148614cf11afSPaul Mackerras }
148714cf11afSPaul Mackerras 
148814cf11afSPaul Mackerras void nonrecoverable_exception(struct pt_regs *regs)
148914cf11afSPaul Mackerras {
149014cf11afSPaul Mackerras 	printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
149114cf11afSPaul Mackerras 	       regs->nip, regs->msr);
149214cf11afSPaul Mackerras 	debugger(regs);
149314cf11afSPaul Mackerras 	die("nonrecoverable exception", regs, SIGKILL);
149414cf11afSPaul Mackerras }
149514cf11afSPaul Mackerras 
1496dc1c1ca3SStephen Rothwell void kernel_fp_unavailable_exception(struct pt_regs *regs)
1497dc1c1ca3SStephen Rothwell {
1498ba12eedeSLi Zhong 	enum ctx_state prev_state = exception_enter();
1499ba12eedeSLi Zhong 
1500dc1c1ca3SStephen Rothwell 	printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
1501dc1c1ca3SStephen Rothwell 			  "%lx at %lx\n", regs->trap, regs->nip);
1502dc1c1ca3SStephen Rothwell 	die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
1503ba12eedeSLi Zhong 
1504ba12eedeSLi Zhong 	exception_exit(prev_state);
1505dc1c1ca3SStephen Rothwell }
1506dc1c1ca3SStephen Rothwell 
1507dc1c1ca3SStephen Rothwell void altivec_unavailable_exception(struct pt_regs *regs)
1508dc1c1ca3SStephen Rothwell {
1509ba12eedeSLi Zhong 	enum ctx_state prev_state = exception_enter();
1510ba12eedeSLi Zhong 
1511dc1c1ca3SStephen Rothwell 	if (user_mode(regs)) {
1512dc1c1ca3SStephen Rothwell 		/* A user program has executed an altivec instruction,
1513dc1c1ca3SStephen Rothwell 		   but this kernel doesn't support altivec. */
1514dc1c1ca3SStephen Rothwell 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1515ba12eedeSLi Zhong 		goto bail;
1516dc1c1ca3SStephen Rothwell 	}
15176c4841c2SAnton Blanchard 
1518dc1c1ca3SStephen Rothwell 	printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
1519dc1c1ca3SStephen Rothwell 			"%lx at %lx\n", regs->trap, regs->nip);
1520dc1c1ca3SStephen Rothwell 	die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
1521ba12eedeSLi Zhong 
1522ba12eedeSLi Zhong bail:
1523ba12eedeSLi Zhong 	exception_exit(prev_state);
1524dc1c1ca3SStephen Rothwell }
1525dc1c1ca3SStephen Rothwell 
1526ce48b210SMichael Neuling void vsx_unavailable_exception(struct pt_regs *regs)
1527ce48b210SMichael Neuling {
1528ce48b210SMichael Neuling 	if (user_mode(regs)) {
1529ce48b210SMichael Neuling 		/* A user program has executed an vsx instruction,
1530ce48b210SMichael Neuling 		   but this kernel doesn't support vsx. */
1531ce48b210SMichael Neuling 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1532ce48b210SMichael Neuling 		return;
1533ce48b210SMichael Neuling 	}
1534ce48b210SMichael Neuling 
1535ce48b210SMichael Neuling 	printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
1536ce48b210SMichael Neuling 			"%lx at %lx\n", regs->trap, regs->nip);
1537ce48b210SMichael Neuling 	die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
1538ce48b210SMichael Neuling }
1539ce48b210SMichael Neuling 
15402517617eSMichael Neuling #ifdef CONFIG_PPC64
1541172f7aaaSCyril Bur static void tm_unavailable(struct pt_regs *regs)
1542172f7aaaSCyril Bur {
15435d176f75SCyril Bur #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
15445d176f75SCyril Bur 	if (user_mode(regs)) {
15455d176f75SCyril Bur 		current->thread.load_tm++;
15465d176f75SCyril Bur 		regs->msr |= MSR_TM;
15475d176f75SCyril Bur 		tm_enable();
15485d176f75SCyril Bur 		tm_restore_sprs(&current->thread);
15495d176f75SCyril Bur 		return;
15505d176f75SCyril Bur 	}
15515d176f75SCyril Bur #endif
1552172f7aaaSCyril Bur 	pr_emerg("Unrecoverable TM Unavailable Exception "
1553172f7aaaSCyril Bur 			"%lx at %lx\n", regs->trap, regs->nip);
1554172f7aaaSCyril Bur 	die("Unrecoverable TM Unavailable Exception", regs, SIGABRT);
1555172f7aaaSCyril Bur }
1556172f7aaaSCyril Bur 
1557021424a1SMichael Ellerman void facility_unavailable_exception(struct pt_regs *regs)
1558d0c0c9a1SMichael Neuling {
1559021424a1SMichael Ellerman 	static char *facility_strings[] = {
15602517617eSMichael Neuling 		[FSCR_FP_LG] = "FPU",
15612517617eSMichael Neuling 		[FSCR_VECVSX_LG] = "VMX/VSX",
15622517617eSMichael Neuling 		[FSCR_DSCR_LG] = "DSCR",
15632517617eSMichael Neuling 		[FSCR_PM_LG] = "PMU SPRs",
15642517617eSMichael Neuling 		[FSCR_BHRB_LG] = "BHRB",
15652517617eSMichael Neuling 		[FSCR_TM_LG] = "TM",
15662517617eSMichael Neuling 		[FSCR_EBB_LG] = "EBB",
15672517617eSMichael Neuling 		[FSCR_TAR_LG] = "TAR",
1568794464f4SNicholas Piggin 		[FSCR_MSGP_LG] = "MSGP",
15699b7ff0c6SNicholas Piggin 		[FSCR_SCV_LG] = "SCV",
1570021424a1SMichael Ellerman 	};
15712517617eSMichael Neuling 	char *facility = "unknown";
1572021424a1SMichael Ellerman 	u64 value;
1573c952c1c4SAnshuman Khandual 	u32 instword, rd;
15742517617eSMichael Neuling 	u8 status;
15752517617eSMichael Neuling 	bool hv;
1576021424a1SMichael Ellerman 
15772271db20SBenjamin Herrenschmidt 	hv = (TRAP(regs) == 0xf80);
15782517617eSMichael Neuling 	if (hv)
1579b14b6260SMichael Ellerman 		value = mfspr(SPRN_HFSCR);
15802517617eSMichael Neuling 	else
15812517617eSMichael Neuling 		value = mfspr(SPRN_FSCR);
15822517617eSMichael Neuling 
15832517617eSMichael Neuling 	status = value >> 56;
15842517617eSMichael Neuling 	if (status == FSCR_DSCR_LG) {
1585c952c1c4SAnshuman Khandual 		/*
1586c952c1c4SAnshuman Khandual 		 * User is accessing the DSCR register using the problem
1587c952c1c4SAnshuman Khandual 		 * state only SPR number (0x03) either through a mfspr or
1588c952c1c4SAnshuman Khandual 		 * a mtspr instruction. If it is a write attempt through
1589c952c1c4SAnshuman Khandual 		 * a mtspr, then we set the inherit bit. This also allows
1590c952c1c4SAnshuman Khandual 		 * the user to write or read the register directly in the
1591c952c1c4SAnshuman Khandual 		 * future by setting via the FSCR DSCR bit. But in case it
1592c952c1c4SAnshuman Khandual 		 * is a read DSCR attempt through a mfspr instruction, we
1593c952c1c4SAnshuman Khandual 		 * just emulate the instruction instead. This code path will
1594c952c1c4SAnshuman Khandual 		 * always emulate all the mfspr instructions till the user
1595c952c1c4SAnshuman Khandual 		 * has attempted at least one mtspr instruction. This way it
1596c952c1c4SAnshuman Khandual 		 * preserves the same behaviour when the user is accessing
1597c952c1c4SAnshuman Khandual 		 * the DSCR through privilege level only SPR number (0x11)
1598c952c1c4SAnshuman Khandual 		 * which is emulated through illegal instruction exception.
1599c952c1c4SAnshuman Khandual 		 * We always leave HFSCR DSCR set.
16002517617eSMichael Neuling 		 */
1601c952c1c4SAnshuman Khandual 		if (get_user(instword, (u32 __user *)(regs->nip))) {
1602c952c1c4SAnshuman Khandual 			pr_err("Failed to fetch the user instruction\n");
1603c952c1c4SAnshuman Khandual 			return;
1604c952c1c4SAnshuman Khandual 		}
1605c952c1c4SAnshuman Khandual 
1606c952c1c4SAnshuman Khandual 		/* Write into DSCR (mtspr 0x03, RS) */
1607c952c1c4SAnshuman Khandual 		if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK)
1608c952c1c4SAnshuman Khandual 				== PPC_INST_MTSPR_DSCR_USER) {
1609c952c1c4SAnshuman Khandual 			rd = (instword >> 21) & 0x1f;
1610c952c1c4SAnshuman Khandual 			current->thread.dscr = regs->gpr[rd];
16112517617eSMichael Neuling 			current->thread.dscr_inherit = 1;
1612b57bd2deSMichael Neuling 			current->thread.fscr |= FSCR_DSCR;
1613b57bd2deSMichael Neuling 			mtspr(SPRN_FSCR, current->thread.fscr);
1614c952c1c4SAnshuman Khandual 		}
1615c952c1c4SAnshuman Khandual 
1616c952c1c4SAnshuman Khandual 		/* Read from DSCR (mfspr RT, 0x03) */
1617c952c1c4SAnshuman Khandual 		if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK)
1618c952c1c4SAnshuman Khandual 				== PPC_INST_MFSPR_DSCR_USER) {
1619c952c1c4SAnshuman Khandual 			if (emulate_instruction(regs)) {
1620c952c1c4SAnshuman Khandual 				pr_err("DSCR based mfspr emulation failed\n");
1621c952c1c4SAnshuman Khandual 				return;
1622c952c1c4SAnshuman Khandual 			}
1623c952c1c4SAnshuman Khandual 			regs->nip += 4;
1624c952c1c4SAnshuman Khandual 			emulate_single_step(regs);
1625c952c1c4SAnshuman Khandual 		}
16262517617eSMichael Neuling 		return;
1627b14b6260SMichael Ellerman 	}
1628b14b6260SMichael Ellerman 
1629172f7aaaSCyril Bur 	if (status == FSCR_TM_LG) {
1630172f7aaaSCyril Bur 		/*
1631172f7aaaSCyril Bur 		 * If we're here then the hardware is TM aware because it
1632172f7aaaSCyril Bur 		 * generated an exception with FSRM_TM set.
1633172f7aaaSCyril Bur 		 *
1634172f7aaaSCyril Bur 		 * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware
1635172f7aaaSCyril Bur 		 * told us not to do TM, or the kernel is not built with TM
1636172f7aaaSCyril Bur 		 * support.
1637172f7aaaSCyril Bur 		 *
1638172f7aaaSCyril Bur 		 * If both of those things are true, then userspace can spam the
1639172f7aaaSCyril Bur 		 * console by triggering the printk() below just by continually
1640172f7aaaSCyril Bur 		 * doing tbegin (or any TM instruction). So in that case just
1641172f7aaaSCyril Bur 		 * send the process a SIGILL immediately.
1642172f7aaaSCyril Bur 		 */
1643172f7aaaSCyril Bur 		if (!cpu_has_feature(CPU_FTR_TM))
1644172f7aaaSCyril Bur 			goto out;
1645172f7aaaSCyril Bur 
1646172f7aaaSCyril Bur 		tm_unavailable(regs);
1647172f7aaaSCyril Bur 		return;
1648172f7aaaSCyril Bur 	}
1649172f7aaaSCyril Bur 
165093c2ec0fSBalbir Singh 	if ((hv || status >= 2) &&
165193c2ec0fSBalbir Singh 	    (status < ARRAY_SIZE(facility_strings)) &&
16522517617eSMichael Neuling 	    facility_strings[status])
16532517617eSMichael Neuling 		facility = facility_strings[status];
1654021424a1SMichael Ellerman 
1655d0c0c9a1SMichael Neuling 	/* We restore the interrupt state now */
1656d0c0c9a1SMichael Neuling 	if (!arch_irq_disabled_regs(regs))
1657d0c0c9a1SMichael Neuling 		local_irq_enable();
1658d0c0c9a1SMichael Neuling 
165993c2ec0fSBalbir Singh 	pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n",
166093c2ec0fSBalbir Singh 		hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr);
1661d0c0c9a1SMichael Neuling 
1662172f7aaaSCyril Bur out:
1663d0c0c9a1SMichael Neuling 	if (user_mode(regs)) {
1664d0c0c9a1SMichael Neuling 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1665d0c0c9a1SMichael Neuling 		return;
1666d0c0c9a1SMichael Neuling 	}
1667d0c0c9a1SMichael Neuling 
1668021424a1SMichael Ellerman 	die("Unexpected facility unavailable exception", regs, SIGABRT);
1669d0c0c9a1SMichael Neuling }
16702517617eSMichael Neuling #endif
1671d0c0c9a1SMichael Neuling 
1672f54db641SMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1673f54db641SMichael Neuling 
1674f54db641SMichael Neuling void fp_unavailable_tm(struct pt_regs *regs)
1675f54db641SMichael Neuling {
1676f54db641SMichael Neuling 	/* Note:  This does not handle any kind of FP laziness. */
1677f54db641SMichael Neuling 
1678f54db641SMichael Neuling 	TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
1679f54db641SMichael Neuling 		 regs->nip, regs->msr);
1680f54db641SMichael Neuling 
1681f54db641SMichael Neuling         /* We can only have got here if the task started using FP after
1682f54db641SMichael Neuling          * beginning the transaction.  So, the transactional regs are just a
1683f54db641SMichael Neuling          * copy of the checkpointed ones.  But, we still need to recheckpoint
1684f54db641SMichael Neuling          * as we're enabling FP for the process; it will return, abort the
1685f54db641SMichael Neuling          * transaction, and probably retry but now with FP enabled.  So the
1686f54db641SMichael Neuling          * checkpointed FP registers need to be loaded.
1687f54db641SMichael Neuling 	 */
1688d31626f7SPaul Mackerras 	tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1689f54db641SMichael Neuling 	/* Reclaim didn't save out any FPRs to transact_fprs. */
1690f54db641SMichael Neuling 
1691f54db641SMichael Neuling 	/* Enable FP for the task: */
1692a7771176SCyril Bur 	current->thread.load_fp = 1;
1693f54db641SMichael Neuling 
1694f54db641SMichael Neuling 	/* This loads and recheckpoints the FP registers from
1695f54db641SMichael Neuling 	 * thread.fpr[].  They will remain in registers after the
1696f54db641SMichael Neuling 	 * checkpoint so we don't need to reload them after.
16973ac8ff1cSPaul Mackerras 	 * If VMX is in use, the VRs now hold checkpointed values,
16983ac8ff1cSPaul Mackerras 	 * so we don't want to load the VRs from the thread_struct.
1699f54db641SMichael Neuling 	 */
1700eb5c3f1cSCyril Bur 	tm_recheckpoint(&current->thread);
1701f54db641SMichael Neuling }
1702f54db641SMichael Neuling 
1703f54db641SMichael Neuling void altivec_unavailable_tm(struct pt_regs *regs)
1704f54db641SMichael Neuling {
1705f54db641SMichael Neuling 	/* See the comments in fp_unavailable_tm().  This function operates
1706f54db641SMichael Neuling 	 * the same way.
1707f54db641SMichael Neuling 	 */
1708f54db641SMichael Neuling 
1709f54db641SMichael Neuling 	TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
1710f54db641SMichael Neuling 		 "MSR=%lx\n",
1711f54db641SMichael Neuling 		 regs->nip, regs->msr);
1712d31626f7SPaul Mackerras 	tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1713a7771176SCyril Bur 	current->thread.load_vec = 1;
1714eb5c3f1cSCyril Bur 	tm_recheckpoint(&current->thread);
1715f54db641SMichael Neuling 	current->thread.used_vr = 1;
17163ac8ff1cSPaul Mackerras }
17173ac8ff1cSPaul Mackerras 
1718f54db641SMichael Neuling void vsx_unavailable_tm(struct pt_regs *regs)
1719f54db641SMichael Neuling {
1720f54db641SMichael Neuling 	/* See the comments in fp_unavailable_tm().  This works similarly,
1721f54db641SMichael Neuling 	 * though we're loading both FP and VEC registers in here.
1722f54db641SMichael Neuling 	 *
1723f54db641SMichael Neuling 	 * If FP isn't in use, load FP regs.  If VEC isn't in use, load VEC
1724f54db641SMichael Neuling 	 * regs.  Either way, set MSR_VSX.
1725f54db641SMichael Neuling 	 */
1726f54db641SMichael Neuling 
1727f54db641SMichael Neuling 	TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
1728f54db641SMichael Neuling 		 "MSR=%lx\n",
1729f54db641SMichael Neuling 		 regs->nip, regs->msr);
1730f54db641SMichael Neuling 
17313ac8ff1cSPaul Mackerras 	current->thread.used_vsr = 1;
17323ac8ff1cSPaul Mackerras 
1733f54db641SMichael Neuling 	/* This reclaims FP and/or VR regs if they're already enabled */
1734d31626f7SPaul Mackerras 	tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1735f54db641SMichael Neuling 
1736a7771176SCyril Bur 	current->thread.load_vec = 1;
1737a7771176SCyril Bur 	current->thread.load_fp = 1;
17383ac8ff1cSPaul Mackerras 
1739eb5c3f1cSCyril Bur 	tm_recheckpoint(&current->thread);
1740f54db641SMichael Neuling }
1741f54db641SMichael Neuling #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1742f54db641SMichael Neuling 
1743dc1c1ca3SStephen Rothwell void performance_monitor_exception(struct pt_regs *regs)
1744dc1c1ca3SStephen Rothwell {
174569111bacSChristoph Lameter 	__this_cpu_inc(irq_stat.pmu_irqs);
174689713ed1SAnton Blanchard 
1747dc1c1ca3SStephen Rothwell 	perf_irq(regs);
1748dc1c1ca3SStephen Rothwell }
1749dc1c1ca3SStephen Rothwell 
1750172ae2e7SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_REGS
17513bffb652SDave Kleikamp static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
17523bffb652SDave Kleikamp {
17533bffb652SDave Kleikamp 	int changed = 0;
17543bffb652SDave Kleikamp 	/*
17553bffb652SDave Kleikamp 	 * Determine the cause of the debug event, clear the
17563bffb652SDave Kleikamp 	 * event flags and send a trap to the handler. Torez
17573bffb652SDave Kleikamp 	 */
17583bffb652SDave Kleikamp 	if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
17593bffb652SDave Kleikamp 		dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
17603bffb652SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
176151ae8d4aSBharat Bhushan 		current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
17623bffb652SDave Kleikamp #endif
17633bffb652SDave Kleikamp 		do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT,
17643bffb652SDave Kleikamp 			     5);
17653bffb652SDave Kleikamp 		changed |= 0x01;
17663bffb652SDave Kleikamp 	}  else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
17673bffb652SDave Kleikamp 		dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
17683bffb652SDave Kleikamp 		do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT,
17693bffb652SDave Kleikamp 			     6);
17703bffb652SDave Kleikamp 		changed |= 0x01;
17713bffb652SDave Kleikamp 	}  else if (debug_status & DBSR_IAC1) {
177251ae8d4aSBharat Bhushan 		current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
17733bffb652SDave Kleikamp 		dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
17743bffb652SDave Kleikamp 		do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT,
17753bffb652SDave Kleikamp 			     1);
17763bffb652SDave Kleikamp 		changed |= 0x01;
17773bffb652SDave Kleikamp 	}  else if (debug_status & DBSR_IAC2) {
177851ae8d4aSBharat Bhushan 		current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
17793bffb652SDave Kleikamp 		do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT,
17803bffb652SDave Kleikamp 			     2);
17813bffb652SDave Kleikamp 		changed |= 0x01;
17823bffb652SDave Kleikamp 	}  else if (debug_status & DBSR_IAC3) {
178351ae8d4aSBharat Bhushan 		current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
17843bffb652SDave Kleikamp 		dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
17853bffb652SDave Kleikamp 		do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT,
17863bffb652SDave Kleikamp 			     3);
17873bffb652SDave Kleikamp 		changed |= 0x01;
17883bffb652SDave Kleikamp 	}  else if (debug_status & DBSR_IAC4) {
178951ae8d4aSBharat Bhushan 		current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
17903bffb652SDave Kleikamp 		do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT,
17913bffb652SDave Kleikamp 			     4);
17923bffb652SDave Kleikamp 		changed |= 0x01;
17933bffb652SDave Kleikamp 	}
17943bffb652SDave Kleikamp 	/*
17953bffb652SDave Kleikamp 	 * At the point this routine was called, the MSR(DE) was turned off.
17963bffb652SDave Kleikamp 	 * Check all other debug flags and see if that bit needs to be turned
17973bffb652SDave Kleikamp 	 * back on or not.
17983bffb652SDave Kleikamp 	 */
179951ae8d4aSBharat Bhushan 	if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
180051ae8d4aSBharat Bhushan 			       current->thread.debug.dbcr1))
18013bffb652SDave Kleikamp 		regs->msr |= MSR_DE;
18023bffb652SDave Kleikamp 	else
18033bffb652SDave Kleikamp 		/* Make sure the IDM flag is off */
180451ae8d4aSBharat Bhushan 		current->thread.debug.dbcr0 &= ~DBCR0_IDM;
18053bffb652SDave Kleikamp 
18063bffb652SDave Kleikamp 	if (changed & 0x01)
180751ae8d4aSBharat Bhushan 		mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
18083bffb652SDave Kleikamp }
180914cf11afSPaul Mackerras 
181003465f89SNicholas Piggin void DebugException(struct pt_regs *regs, unsigned long debug_status)
181114cf11afSPaul Mackerras {
181251ae8d4aSBharat Bhushan 	current->thread.debug.dbsr = debug_status;
18133bffb652SDave Kleikamp 
1814ec097c84SRoland McGrath 	/* Hack alert: On BookE, Branch Taken stops on the branch itself, while
1815ec097c84SRoland McGrath 	 * on server, it stops on the target of the branch. In order to simulate
1816ec097c84SRoland McGrath 	 * the server behaviour, we thus restart right away with a single step
1817ec097c84SRoland McGrath 	 * instead of stopping here when hitting a BT
1818ec097c84SRoland McGrath 	 */
1819ec097c84SRoland McGrath 	if (debug_status & DBSR_BT) {
1820ec097c84SRoland McGrath 		regs->msr &= ~MSR_DE;
1821ec097c84SRoland McGrath 
1822ec097c84SRoland McGrath 		/* Disable BT */
1823ec097c84SRoland McGrath 		mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
1824ec097c84SRoland McGrath 		/* Clear the BT event */
1825ec097c84SRoland McGrath 		mtspr(SPRN_DBSR, DBSR_BT);
1826ec097c84SRoland McGrath 
1827ec097c84SRoland McGrath 		/* Do the single step trick only when coming from userspace */
1828ec097c84SRoland McGrath 		if (user_mode(regs)) {
182951ae8d4aSBharat Bhushan 			current->thread.debug.dbcr0 &= ~DBCR0_BT;
183051ae8d4aSBharat Bhushan 			current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1831ec097c84SRoland McGrath 			regs->msr |= MSR_DE;
1832ec097c84SRoland McGrath 			return;
1833ec097c84SRoland McGrath 		}
1834ec097c84SRoland McGrath 
18356cc89badSNaveen N. Rao 		if (kprobe_post_handler(regs))
18366cc89badSNaveen N. Rao 			return;
18376cc89badSNaveen N. Rao 
1838ec097c84SRoland McGrath 		if (notify_die(DIE_SSTEP, "block_step", regs, 5,
1839ec097c84SRoland McGrath 			       5, SIGTRAP) == NOTIFY_STOP) {
1840ec097c84SRoland McGrath 			return;
1841ec097c84SRoland McGrath 		}
1842ec097c84SRoland McGrath 		if (debugger_sstep(regs))
1843ec097c84SRoland McGrath 			return;
1844ec097c84SRoland McGrath 	} else if (debug_status & DBSR_IC) { 	/* Instruction complete */
184514cf11afSPaul Mackerras 		regs->msr &= ~MSR_DE;
1846f8279621SKumar Gala 
184714cf11afSPaul Mackerras 		/* Disable instruction completion */
184814cf11afSPaul Mackerras 		mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
184914cf11afSPaul Mackerras 		/* Clear the instruction completion event */
185014cf11afSPaul Mackerras 		mtspr(SPRN_DBSR, DBSR_IC);
1851f8279621SKumar Gala 
18526cc89badSNaveen N. Rao 		if (kprobe_post_handler(regs))
18536cc89badSNaveen N. Rao 			return;
18546cc89badSNaveen N. Rao 
1855f8279621SKumar Gala 		if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1856f8279621SKumar Gala 			       5, SIGTRAP) == NOTIFY_STOP) {
185714cf11afSPaul Mackerras 			return;
185814cf11afSPaul Mackerras 		}
1859f8279621SKumar Gala 
1860f8279621SKumar Gala 		if (debugger_sstep(regs))
1861f8279621SKumar Gala 			return;
1862f8279621SKumar Gala 
18633bffb652SDave Kleikamp 		if (user_mode(regs)) {
186451ae8d4aSBharat Bhushan 			current->thread.debug.dbcr0 &= ~DBCR0_IC;
186551ae8d4aSBharat Bhushan 			if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
186651ae8d4aSBharat Bhushan 					       current->thread.debug.dbcr1))
18673bffb652SDave Kleikamp 				regs->msr |= MSR_DE;
18683bffb652SDave Kleikamp 			else
18693bffb652SDave Kleikamp 				/* Make sure the IDM bit is off */
187051ae8d4aSBharat Bhushan 				current->thread.debug.dbcr0 &= ~DBCR0_IDM;
18713bffb652SDave Kleikamp 		}
1872f8279621SKumar Gala 
1873f8279621SKumar Gala 		_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
18743bffb652SDave Kleikamp 	} else
18753bffb652SDave Kleikamp 		handle_debug(regs, debug_status);
187614cf11afSPaul Mackerras }
187703465f89SNicholas Piggin NOKPROBE_SYMBOL(DebugException);
1878172ae2e7SDave Kleikamp #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
187914cf11afSPaul Mackerras 
188014cf11afSPaul Mackerras #if !defined(CONFIG_TAU_INT)
188114cf11afSPaul Mackerras void TAUException(struct pt_regs *regs)
188214cf11afSPaul Mackerras {
188314cf11afSPaul Mackerras 	printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx    %s\n",
188414cf11afSPaul Mackerras 	       regs->nip, regs->msr, regs->trap, print_tainted());
188514cf11afSPaul Mackerras }
188614cf11afSPaul Mackerras #endif /* CONFIG_INT_TAU */
188714cf11afSPaul Mackerras 
188814cf11afSPaul Mackerras #ifdef CONFIG_ALTIVEC
1889dc1c1ca3SStephen Rothwell void altivec_assist_exception(struct pt_regs *regs)
189014cf11afSPaul Mackerras {
189114cf11afSPaul Mackerras 	int err;
189214cf11afSPaul Mackerras 
189314cf11afSPaul Mackerras 	if (!user_mode(regs)) {
189414cf11afSPaul Mackerras 		printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
189514cf11afSPaul Mackerras 		       " at %lx\n", regs->nip);
18968dad3f92SPaul Mackerras 		die("Kernel VMX/Altivec assist exception", regs, SIGILL);
189714cf11afSPaul Mackerras 	}
189814cf11afSPaul Mackerras 
1899dc1c1ca3SStephen Rothwell 	flush_altivec_to_thread(current);
1900dc1c1ca3SStephen Rothwell 
1901eecff81dSAnton Blanchard 	PPC_WARN_EMULATED(altivec, regs);
190214cf11afSPaul Mackerras 	err = emulate_altivec(regs);
190314cf11afSPaul Mackerras 	if (err == 0) {
190414cf11afSPaul Mackerras 		regs->nip += 4;		/* skip emulated instruction */
190514cf11afSPaul Mackerras 		emulate_single_step(regs);
190614cf11afSPaul Mackerras 		return;
190714cf11afSPaul Mackerras 	}
190814cf11afSPaul Mackerras 
190914cf11afSPaul Mackerras 	if (err == -EFAULT) {
191014cf11afSPaul Mackerras 		/* got an error reading the instruction */
191114cf11afSPaul Mackerras 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
191214cf11afSPaul Mackerras 	} else {
191314cf11afSPaul Mackerras 		/* didn't recognize the instruction */
191414cf11afSPaul Mackerras 		/* XXX quick hack for now: set the non-Java bit in the VSCR */
191576462232SChristian Dietrich 		printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
191614cf11afSPaul Mackerras 				   "in %s at %lx\n", current->comm, regs->nip);
1917de79f7b9SPaul Mackerras 		current->thread.vr_state.vscr.u[3] |= 0x10000;
191814cf11afSPaul Mackerras 	}
191914cf11afSPaul Mackerras }
192014cf11afSPaul Mackerras #endif /* CONFIG_ALTIVEC */
192114cf11afSPaul Mackerras 
192214cf11afSPaul Mackerras #ifdef CONFIG_FSL_BOOKE
192314cf11afSPaul Mackerras void CacheLockingException(struct pt_regs *regs, unsigned long address,
192414cf11afSPaul Mackerras 			   unsigned long error_code)
192514cf11afSPaul Mackerras {
192614cf11afSPaul Mackerras 	/* We treat cache locking instructions from the user
192714cf11afSPaul Mackerras 	 * as priv ops, in the future we could try to do
192814cf11afSPaul Mackerras 	 * something smarter
192914cf11afSPaul Mackerras 	 */
193014cf11afSPaul Mackerras 	if (error_code & (ESR_DLK|ESR_ILK))
193114cf11afSPaul Mackerras 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
193214cf11afSPaul Mackerras 	return;
193314cf11afSPaul Mackerras }
193414cf11afSPaul Mackerras #endif /* CONFIG_FSL_BOOKE */
193514cf11afSPaul Mackerras 
193614cf11afSPaul Mackerras #ifdef CONFIG_SPE
193714cf11afSPaul Mackerras void SPEFloatingPointException(struct pt_regs *regs)
193814cf11afSPaul Mackerras {
19396a800f36SLiu Yu 	extern int do_spe_mathemu(struct pt_regs *regs);
194014cf11afSPaul Mackerras 	unsigned long spefscr;
194114cf11afSPaul Mackerras 	int fpexc_mode;
194214cf11afSPaul Mackerras 	int code = 0;
19436a800f36SLiu Yu 	int err;
19446a800f36SLiu Yu 
1945685659eeSyu liu 	flush_spe_to_thread(current);
194614cf11afSPaul Mackerras 
194714cf11afSPaul Mackerras 	spefscr = current->thread.spefscr;
194814cf11afSPaul Mackerras 	fpexc_mode = current->thread.fpexc_mode;
194914cf11afSPaul Mackerras 
195014cf11afSPaul Mackerras 	if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
195114cf11afSPaul Mackerras 		code = FPE_FLTOVF;
195214cf11afSPaul Mackerras 	}
195314cf11afSPaul Mackerras 	else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
195414cf11afSPaul Mackerras 		code = FPE_FLTUND;
195514cf11afSPaul Mackerras 	}
195614cf11afSPaul Mackerras 	else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
195714cf11afSPaul Mackerras 		code = FPE_FLTDIV;
195814cf11afSPaul Mackerras 	else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
195914cf11afSPaul Mackerras 		code = FPE_FLTINV;
196014cf11afSPaul Mackerras 	}
196114cf11afSPaul Mackerras 	else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
196214cf11afSPaul Mackerras 		code = FPE_FLTRES;
196314cf11afSPaul Mackerras 
19646a800f36SLiu Yu 	err = do_spe_mathemu(regs);
19656a800f36SLiu Yu 	if (err == 0) {
19666a800f36SLiu Yu 		regs->nip += 4;		/* skip emulated instruction */
19676a800f36SLiu Yu 		emulate_single_step(regs);
196814cf11afSPaul Mackerras 		return;
196914cf11afSPaul Mackerras 	}
19706a800f36SLiu Yu 
19716a800f36SLiu Yu 	if (err == -EFAULT) {
19726a800f36SLiu Yu 		/* got an error reading the instruction */
19736a800f36SLiu Yu 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
19746a800f36SLiu Yu 	} else if (err == -EINVAL) {
19756a800f36SLiu Yu 		/* didn't recognize the instruction */
19766a800f36SLiu Yu 		printk(KERN_ERR "unrecognized spe instruction "
19776a800f36SLiu Yu 		       "in %s at %lx\n", current->comm, regs->nip);
19786a800f36SLiu Yu 	} else {
19796a800f36SLiu Yu 		_exception(SIGFPE, regs, code, regs->nip);
19806a800f36SLiu Yu 	}
19816a800f36SLiu Yu 
19826a800f36SLiu Yu 	return;
19836a800f36SLiu Yu }
19846a800f36SLiu Yu 
19856a800f36SLiu Yu void SPEFloatingPointRoundException(struct pt_regs *regs)
19866a800f36SLiu Yu {
19876a800f36SLiu Yu 	extern int speround_handler(struct pt_regs *regs);
19886a800f36SLiu Yu 	int err;
19896a800f36SLiu Yu 
19906a800f36SLiu Yu 	preempt_disable();
19916a800f36SLiu Yu 	if (regs->msr & MSR_SPE)
19926a800f36SLiu Yu 		giveup_spe(current);
19936a800f36SLiu Yu 	preempt_enable();
19946a800f36SLiu Yu 
19956a800f36SLiu Yu 	regs->nip -= 4;
19966a800f36SLiu Yu 	err = speround_handler(regs);
19976a800f36SLiu Yu 	if (err == 0) {
19986a800f36SLiu Yu 		regs->nip += 4;		/* skip emulated instruction */
19996a800f36SLiu Yu 		emulate_single_step(regs);
20006a800f36SLiu Yu 		return;
20016a800f36SLiu Yu 	}
20026a800f36SLiu Yu 
20036a800f36SLiu Yu 	if (err == -EFAULT) {
20046a800f36SLiu Yu 		/* got an error reading the instruction */
20056a800f36SLiu Yu 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
20066a800f36SLiu Yu 	} else if (err == -EINVAL) {
20076a800f36SLiu Yu 		/* didn't recognize the instruction */
20086a800f36SLiu Yu 		printk(KERN_ERR "unrecognized spe instruction "
20096a800f36SLiu Yu 		       "in %s at %lx\n", current->comm, regs->nip);
20106a800f36SLiu Yu 	} else {
20116a800f36SLiu Yu 		_exception(SIGFPE, regs, 0, regs->nip);
20126a800f36SLiu Yu 		return;
20136a800f36SLiu Yu 	}
20146a800f36SLiu Yu }
201514cf11afSPaul Mackerras #endif
201614cf11afSPaul Mackerras 
2017dc1c1ca3SStephen Rothwell /*
2018dc1c1ca3SStephen Rothwell  * We enter here if we get an unrecoverable exception, that is, one
2019dc1c1ca3SStephen Rothwell  * that happened at a point where the RI (recoverable interrupt) bit
2020dc1c1ca3SStephen Rothwell  * in the MSR is 0.  This indicates that SRR0/1 are live, and that
2021dc1c1ca3SStephen Rothwell  * we therefore lost state by taking this exception.
2022dc1c1ca3SStephen Rothwell  */
2023dc1c1ca3SStephen Rothwell void unrecoverable_exception(struct pt_regs *regs)
2024dc1c1ca3SStephen Rothwell {
2025dc1c1ca3SStephen Rothwell 	printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
2026dc1c1ca3SStephen Rothwell 	       regs->trap, regs->nip);
2027dc1c1ca3SStephen Rothwell 	die("Unrecoverable exception", regs, SIGABRT);
2028dc1c1ca3SStephen Rothwell }
202915770a13SNaveen N. Rao NOKPROBE_SYMBOL(unrecoverable_exception);
2030dc1c1ca3SStephen Rothwell 
20311e18c17aSJason Gunthorpe #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
203214cf11afSPaul Mackerras /*
203314cf11afSPaul Mackerras  * Default handler for a Watchdog exception,
203414cf11afSPaul Mackerras  * spins until a reboot occurs
203514cf11afSPaul Mackerras  */
203614cf11afSPaul Mackerras void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
203714cf11afSPaul Mackerras {
203814cf11afSPaul Mackerras 	/* Generic WatchdogHandler, implement your own */
203914cf11afSPaul Mackerras 	mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
204014cf11afSPaul Mackerras 	return;
204114cf11afSPaul Mackerras }
204214cf11afSPaul Mackerras 
204314cf11afSPaul Mackerras void WatchdogException(struct pt_regs *regs)
204414cf11afSPaul Mackerras {
204514cf11afSPaul Mackerras 	printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
204614cf11afSPaul Mackerras 	WatchdogHandler(regs);
204714cf11afSPaul Mackerras }
204814cf11afSPaul Mackerras #endif
2049dc1c1ca3SStephen Rothwell 
2050dc1c1ca3SStephen Rothwell /*
2051dc1c1ca3SStephen Rothwell  * We enter here if we discover during exception entry that we are
2052dc1c1ca3SStephen Rothwell  * running in supervisor mode with a userspace value in the stack pointer.
2053dc1c1ca3SStephen Rothwell  */
2054dc1c1ca3SStephen Rothwell void kernel_bad_stack(struct pt_regs *regs)
2055dc1c1ca3SStephen Rothwell {
2056dc1c1ca3SStephen Rothwell 	printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
2057dc1c1ca3SStephen Rothwell 	       regs->gpr[1], regs->nip);
2058dc1c1ca3SStephen Rothwell 	die("Bad kernel stack pointer", regs, SIGABRT);
2059dc1c1ca3SStephen Rothwell }
206015770a13SNaveen N. Rao NOKPROBE_SYMBOL(kernel_bad_stack);
206114cf11afSPaul Mackerras 
206214cf11afSPaul Mackerras void __init trap_init(void)
206314cf11afSPaul Mackerras {
206414cf11afSPaul Mackerras }
206580947e7cSGeert Uytterhoeven 
206680947e7cSGeert Uytterhoeven 
206780947e7cSGeert Uytterhoeven #ifdef CONFIG_PPC_EMULATED_STATS
206880947e7cSGeert Uytterhoeven 
206980947e7cSGeert Uytterhoeven #define WARN_EMULATED_SETUP(type)	.type = { .name = #type }
207080947e7cSGeert Uytterhoeven 
207180947e7cSGeert Uytterhoeven struct ppc_emulated ppc_emulated = {
207280947e7cSGeert Uytterhoeven #ifdef CONFIG_ALTIVEC
207380947e7cSGeert Uytterhoeven 	WARN_EMULATED_SETUP(altivec),
207480947e7cSGeert Uytterhoeven #endif
207580947e7cSGeert Uytterhoeven 	WARN_EMULATED_SETUP(dcba),
207680947e7cSGeert Uytterhoeven 	WARN_EMULATED_SETUP(dcbz),
207780947e7cSGeert Uytterhoeven 	WARN_EMULATED_SETUP(fp_pair),
207880947e7cSGeert Uytterhoeven 	WARN_EMULATED_SETUP(isel),
207980947e7cSGeert Uytterhoeven 	WARN_EMULATED_SETUP(mcrxr),
208080947e7cSGeert Uytterhoeven 	WARN_EMULATED_SETUP(mfpvr),
208180947e7cSGeert Uytterhoeven 	WARN_EMULATED_SETUP(multiple),
208280947e7cSGeert Uytterhoeven 	WARN_EMULATED_SETUP(popcntb),
208380947e7cSGeert Uytterhoeven 	WARN_EMULATED_SETUP(spe),
208480947e7cSGeert Uytterhoeven 	WARN_EMULATED_SETUP(string),
2085a3821b2aSScott Wood 	WARN_EMULATED_SETUP(sync),
208680947e7cSGeert Uytterhoeven 	WARN_EMULATED_SETUP(unaligned),
208780947e7cSGeert Uytterhoeven #ifdef CONFIG_MATH_EMULATION
208880947e7cSGeert Uytterhoeven 	WARN_EMULATED_SETUP(math),
208980947e7cSGeert Uytterhoeven #endif
209080947e7cSGeert Uytterhoeven #ifdef CONFIG_VSX
209180947e7cSGeert Uytterhoeven 	WARN_EMULATED_SETUP(vsx),
209280947e7cSGeert Uytterhoeven #endif
2093efcac658SAlexey Kardashevskiy #ifdef CONFIG_PPC64
2094efcac658SAlexey Kardashevskiy 	WARN_EMULATED_SETUP(mfdscr),
2095efcac658SAlexey Kardashevskiy 	WARN_EMULATED_SETUP(mtdscr),
2096f83319d7SAnton Blanchard 	WARN_EMULATED_SETUP(lq_stq),
20975080332cSMichael Neuling 	WARN_EMULATED_SETUP(lxvw4x),
20985080332cSMichael Neuling 	WARN_EMULATED_SETUP(lxvh8x),
20995080332cSMichael Neuling 	WARN_EMULATED_SETUP(lxvd2x),
21005080332cSMichael Neuling 	WARN_EMULATED_SETUP(lxvb16x),
2101efcac658SAlexey Kardashevskiy #endif
210280947e7cSGeert Uytterhoeven };
210380947e7cSGeert Uytterhoeven 
210480947e7cSGeert Uytterhoeven u32 ppc_warn_emulated;
210580947e7cSGeert Uytterhoeven 
210680947e7cSGeert Uytterhoeven void ppc_warn_emulated_print(const char *type)
210780947e7cSGeert Uytterhoeven {
210876462232SChristian Dietrich 	pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
210980947e7cSGeert Uytterhoeven 			    type);
211080947e7cSGeert Uytterhoeven }
211180947e7cSGeert Uytterhoeven 
211280947e7cSGeert Uytterhoeven static int __init ppc_warn_emulated_init(void)
211380947e7cSGeert Uytterhoeven {
211480947e7cSGeert Uytterhoeven 	struct dentry *dir, *d;
211580947e7cSGeert Uytterhoeven 	unsigned int i;
211680947e7cSGeert Uytterhoeven 	struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
211780947e7cSGeert Uytterhoeven 
211880947e7cSGeert Uytterhoeven 	if (!powerpc_debugfs_root)
211980947e7cSGeert Uytterhoeven 		return -ENODEV;
212080947e7cSGeert Uytterhoeven 
212180947e7cSGeert Uytterhoeven 	dir = debugfs_create_dir("emulated_instructions",
212280947e7cSGeert Uytterhoeven 				 powerpc_debugfs_root);
212380947e7cSGeert Uytterhoeven 	if (!dir)
212480947e7cSGeert Uytterhoeven 		return -ENOMEM;
212580947e7cSGeert Uytterhoeven 
212680947e7cSGeert Uytterhoeven 	d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir,
212780947e7cSGeert Uytterhoeven 			       &ppc_warn_emulated);
212880947e7cSGeert Uytterhoeven 	if (!d)
212980947e7cSGeert Uytterhoeven 		goto fail;
213080947e7cSGeert Uytterhoeven 
213180947e7cSGeert Uytterhoeven 	for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
213280947e7cSGeert Uytterhoeven 		d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir,
213380947e7cSGeert Uytterhoeven 				       (u32 *)&entries[i].val.counter);
213480947e7cSGeert Uytterhoeven 		if (!d)
213580947e7cSGeert Uytterhoeven 			goto fail;
213680947e7cSGeert Uytterhoeven 	}
213780947e7cSGeert Uytterhoeven 
213880947e7cSGeert Uytterhoeven 	return 0;
213980947e7cSGeert Uytterhoeven 
214080947e7cSGeert Uytterhoeven fail:
214180947e7cSGeert Uytterhoeven 	debugfs_remove_recursive(dir);
214280947e7cSGeert Uytterhoeven 	return -ENOMEM;
214380947e7cSGeert Uytterhoeven }
214480947e7cSGeert Uytterhoeven 
214580947e7cSGeert Uytterhoeven device_initcall(ppc_warn_emulated_init);
214680947e7cSGeert Uytterhoeven 
214780947e7cSGeert Uytterhoeven #endif /* CONFIG_PPC_EMULATED_STATS */
2148