xref: /linux/arch/powerpc/kernel/traps.c (revision be68fb64f763b7b6ddb202e0a931f41ae62f71b0)
1 /*
2  *  Copyright (C) 1995-1996  Gary Thomas (gdt@linuxppc.org)
3  *  Copyright 2007-2010 Freescale Semiconductor, Inc.
4  *
5  *  This program is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU General Public License
7  *  as published by the Free Software Foundation; either version
8  *  2 of the License, or (at your option) any later version.
9  *
10  *  Modified by Cort Dougan (cort@cs.nmt.edu)
11  *  and Paul Mackerras (paulus@samba.org)
12  */
13 
14 /*
15  * This file handles the architecture-dependent parts of hardware exceptions
16  */
17 
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/sched/debug.h>
21 #include <linux/kernel.h>
22 #include <linux/mm.h>
23 #include <linux/pkeys.h>
24 #include <linux/stddef.h>
25 #include <linux/unistd.h>
26 #include <linux/ptrace.h>
27 #include <linux/user.h>
28 #include <linux/interrupt.h>
29 #include <linux/init.h>
30 #include <linux/extable.h>
31 #include <linux/module.h>	/* print_modules */
32 #include <linux/prctl.h>
33 #include <linux/delay.h>
34 #include <linux/kprobes.h>
35 #include <linux/kexec.h>
36 #include <linux/backlight.h>
37 #include <linux/bug.h>
38 #include <linux/kdebug.h>
39 #include <linux/ratelimit.h>
40 #include <linux/context_tracking.h>
41 #include <linux/smp.h>
42 
43 #include <asm/emulated_ops.h>
44 #include <asm/pgtable.h>
45 #include <linux/uaccess.h>
46 #include <asm/debugfs.h>
47 #include <asm/io.h>
48 #include <asm/machdep.h>
49 #include <asm/rtas.h>
50 #include <asm/pmc.h>
51 #include <asm/reg.h>
52 #ifdef CONFIG_PMAC_BACKLIGHT
53 #include <asm/backlight.h>
54 #endif
55 #ifdef CONFIG_PPC64
56 #include <asm/firmware.h>
57 #include <asm/processor.h>
58 #include <asm/tm.h>
59 #endif
60 #include <asm/kexec.h>
61 #include <asm/ppc-opcode.h>
62 #include <asm/rio.h>
63 #include <asm/fadump.h>
64 #include <asm/switch_to.h>
65 #include <asm/tm.h>
66 #include <asm/debug.h>
67 #include <asm/asm-prototypes.h>
68 #include <asm/hmi.h>
69 #include <sysdev/fsl_pci.h>
70 #include <asm/kprobes.h>
71 
72 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
73 int (*__debugger)(struct pt_regs *regs) __read_mostly;
74 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
75 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
76 int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
77 int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
78 int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
79 int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
80 
81 EXPORT_SYMBOL(__debugger);
82 EXPORT_SYMBOL(__debugger_ipi);
83 EXPORT_SYMBOL(__debugger_bpt);
84 EXPORT_SYMBOL(__debugger_sstep);
85 EXPORT_SYMBOL(__debugger_iabr_match);
86 EXPORT_SYMBOL(__debugger_break_match);
87 EXPORT_SYMBOL(__debugger_fault_handler);
88 #endif
89 
90 /* Transactional Memory trap debug */
91 #ifdef TM_DEBUG_SW
92 #define TM_DEBUG(x...) printk(KERN_INFO x)
93 #else
94 #define TM_DEBUG(x...) do { } while(0)
95 #endif
96 
97 /*
98  * Trap & Exception support
99  */
100 
101 #ifdef CONFIG_PMAC_BACKLIGHT
102 static void pmac_backlight_unblank(void)
103 {
104 	mutex_lock(&pmac_backlight_mutex);
105 	if (pmac_backlight) {
106 		struct backlight_properties *props;
107 
108 		props = &pmac_backlight->props;
109 		props->brightness = props->max_brightness;
110 		props->power = FB_BLANK_UNBLANK;
111 		backlight_update_status(pmac_backlight);
112 	}
113 	mutex_unlock(&pmac_backlight_mutex);
114 }
115 #else
116 static inline void pmac_backlight_unblank(void) { }
117 #endif
118 
119 /*
120  * If oops/die is expected to crash the machine, return true here.
121  *
122  * This should not be expected to be 100% accurate, there may be
123  * notifiers registered or other unexpected conditions that may bring
124  * down the kernel. Or if the current process in the kernel is holding
125  * locks or has other critical state, the kernel may become effectively
126  * unusable anyway.
127  */
128 bool die_will_crash(void)
129 {
130 	if (should_fadump_crash())
131 		return true;
132 	if (kexec_should_crash(current))
133 		return true;
134 	if (in_interrupt() || panic_on_oops ||
135 			!current->pid || is_global_init(current))
136 		return true;
137 
138 	return false;
139 }
140 
141 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
142 static int die_owner = -1;
143 static unsigned int die_nest_count;
144 static int die_counter;
145 
146 static unsigned long oops_begin(struct pt_regs *regs)
147 {
148 	int cpu;
149 	unsigned long flags;
150 
151 	oops_enter();
152 
153 	/* racy, but better than risking deadlock. */
154 	raw_local_irq_save(flags);
155 	cpu = smp_processor_id();
156 	if (!arch_spin_trylock(&die_lock)) {
157 		if (cpu == die_owner)
158 			/* nested oops. should stop eventually */;
159 		else
160 			arch_spin_lock(&die_lock);
161 	}
162 	die_nest_count++;
163 	die_owner = cpu;
164 	console_verbose();
165 	bust_spinlocks(1);
166 	if (machine_is(powermac))
167 		pmac_backlight_unblank();
168 	return flags;
169 }
170 NOKPROBE_SYMBOL(oops_begin);
171 
172 static void oops_end(unsigned long flags, struct pt_regs *regs,
173 			       int signr)
174 {
175 	bust_spinlocks(0);
176 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
177 	die_nest_count--;
178 	oops_exit();
179 	printk("\n");
180 	if (!die_nest_count) {
181 		/* Nest count reaches zero, release the lock. */
182 		die_owner = -1;
183 		arch_spin_unlock(&die_lock);
184 	}
185 	raw_local_irq_restore(flags);
186 
187 	crash_fadump(regs, "die oops");
188 
189 	if (kexec_should_crash(current))
190 		crash_kexec(regs);
191 
192 	if (!signr)
193 		return;
194 
195 	/*
196 	 * While our oops output is serialised by a spinlock, output
197 	 * from panic() called below can race and corrupt it. If we
198 	 * know we are going to panic, delay for 1 second so we have a
199 	 * chance to get clean backtraces from all CPUs that are oopsing.
200 	 */
201 	if (in_interrupt() || panic_on_oops || !current->pid ||
202 	    is_global_init(current)) {
203 		mdelay(MSEC_PER_SEC);
204 	}
205 
206 	if (in_interrupt())
207 		panic("Fatal exception in interrupt");
208 	if (panic_on_oops)
209 		panic("Fatal exception");
210 	do_exit(signr);
211 }
212 NOKPROBE_SYMBOL(oops_end);
213 
214 static int __die(const char *str, struct pt_regs *regs, long err)
215 {
216 	printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
217 
218 	if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
219 		printk("LE ");
220 	else
221 		printk("BE ");
222 
223 	if (IS_ENABLED(CONFIG_PREEMPT))
224 		pr_cont("PREEMPT ");
225 
226 	if (IS_ENABLED(CONFIG_SMP))
227 		pr_cont("SMP NR_CPUS=%d ", NR_CPUS);
228 
229 	if (debug_pagealloc_enabled())
230 		pr_cont("DEBUG_PAGEALLOC ");
231 
232 	if (IS_ENABLED(CONFIG_NUMA))
233 		pr_cont("NUMA ");
234 
235 	pr_cont("%s\n", ppc_md.name ? ppc_md.name : "");
236 
237 	if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
238 		return 1;
239 
240 	print_modules();
241 	show_regs(regs);
242 
243 	return 0;
244 }
245 NOKPROBE_SYMBOL(__die);
246 
247 void die(const char *str, struct pt_regs *regs, long err)
248 {
249 	unsigned long flags;
250 
251 	if (debugger(regs))
252 		return;
253 
254 	flags = oops_begin(regs);
255 	if (__die(str, regs, err))
256 		err = 0;
257 	oops_end(flags, regs, err);
258 }
259 NOKPROBE_SYMBOL(die);
260 
261 void user_single_step_siginfo(struct task_struct *tsk,
262 				struct pt_regs *regs, siginfo_t *info)
263 {
264 	memset(info, 0, sizeof(*info));
265 	info->si_signo = SIGTRAP;
266 	info->si_code = TRAP_TRACE;
267 	info->si_addr = (void __user *)regs->nip;
268 }
269 
270 
271 void _exception_pkey(int signr, struct pt_regs *regs, int code,
272 		unsigned long addr, int key)
273 {
274 	siginfo_t info;
275 	const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \
276 			"at %08lx nip %08lx lr %08lx code %x\n";
277 	const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \
278 			"at %016lx nip %016lx lr %016lx code %x\n";
279 
280 	if (!user_mode(regs)) {
281 		die("Exception in kernel mode", regs, signr);
282 		return;
283 	}
284 
285 	if (show_unhandled_signals && unhandled_signal(current, signr)) {
286 		printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
287 				   current->comm, current->pid, signr,
288 				   addr, regs->nip, regs->link, code);
289 	}
290 
291 	if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
292 		local_irq_enable();
293 
294 	current->thread.trap_nr = code;
295 
296 	/*
297 	 * Save all the pkey registers AMR/IAMR/UAMOR. Eg: Core dumps need
298 	 * to capture the content, if the task gets killed.
299 	 */
300 	thread_pkey_regs_save(&current->thread);
301 
302 	memset(&info, 0, sizeof(info));
303 	info.si_signo = signr;
304 	info.si_code = code;
305 	info.si_addr = (void __user *) addr;
306 	info.si_pkey = key;
307 
308 	force_sig_info(signr, &info, current);
309 }
310 
311 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
312 {
313 	_exception_pkey(signr, regs, code, addr, 0);
314 }
315 
316 void system_reset_exception(struct pt_regs *regs)
317 {
318 	/*
319 	 * Avoid crashes in case of nested NMI exceptions. Recoverability
320 	 * is determined by RI and in_nmi
321 	 */
322 	bool nested = in_nmi();
323 	if (!nested)
324 		nmi_enter();
325 
326 	__this_cpu_inc(irq_stat.sreset_irqs);
327 
328 	/* See if any machine dependent calls */
329 	if (ppc_md.system_reset_exception) {
330 		if (ppc_md.system_reset_exception(regs))
331 			goto out;
332 	}
333 
334 	if (debugger(regs))
335 		goto out;
336 
337 	/*
338 	 * A system reset is a request to dump, so we always send
339 	 * it through the crashdump code (if fadump or kdump are
340 	 * registered).
341 	 */
342 	crash_fadump(regs, "System Reset");
343 
344 	crash_kexec(regs);
345 
346 	/*
347 	 * We aren't the primary crash CPU. We need to send it
348 	 * to a holding pattern to avoid it ending up in the panic
349 	 * code.
350 	 */
351 	crash_kexec_secondary(regs);
352 
353 	/*
354 	 * No debugger or crash dump registered, print logs then
355 	 * panic.
356 	 */
357 	die("System Reset", regs, SIGABRT);
358 
359 	mdelay(2*MSEC_PER_SEC); /* Wait a little while for others to print */
360 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
361 	nmi_panic(regs, "System Reset");
362 
363 out:
364 #ifdef CONFIG_PPC_BOOK3S_64
365 	BUG_ON(get_paca()->in_nmi == 0);
366 	if (get_paca()->in_nmi > 1)
367 		nmi_panic(regs, "Unrecoverable nested System Reset");
368 #endif
369 	/* Must die if the interrupt is not recoverable */
370 	if (!(regs->msr & MSR_RI))
371 		nmi_panic(regs, "Unrecoverable System Reset");
372 
373 	if (!nested)
374 		nmi_exit();
375 
376 	/* What should we do here? We could issue a shutdown or hard reset. */
377 }
378 
379 /*
380  * I/O accesses can cause machine checks on powermacs.
381  * Check if the NIP corresponds to the address of a sync
382  * instruction for which there is an entry in the exception
383  * table.
384  * Note that the 601 only takes a machine check on TEA
385  * (transfer error ack) signal assertion, and does not
386  * set any of the top 16 bits of SRR1.
387  *  -- paulus.
388  */
389 static inline int check_io_access(struct pt_regs *regs)
390 {
391 #ifdef CONFIG_PPC32
392 	unsigned long msr = regs->msr;
393 	const struct exception_table_entry *entry;
394 	unsigned int *nip = (unsigned int *)regs->nip;
395 
396 	if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
397 	    && (entry = search_exception_tables(regs->nip)) != NULL) {
398 		/*
399 		 * Check that it's a sync instruction, or somewhere
400 		 * in the twi; isync; nop sequence that inb/inw/inl uses.
401 		 * As the address is in the exception table
402 		 * we should be able to read the instr there.
403 		 * For the debug message, we look at the preceding
404 		 * load or store.
405 		 */
406 		if (*nip == PPC_INST_NOP)
407 			nip -= 2;
408 		else if (*nip == PPC_INST_ISYNC)
409 			--nip;
410 		if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) {
411 			unsigned int rb;
412 
413 			--nip;
414 			rb = (*nip >> 11) & 0x1f;
415 			printk(KERN_DEBUG "%s bad port %lx at %p\n",
416 			       (*nip & 0x100)? "OUT to": "IN from",
417 			       regs->gpr[rb] - _IO_BASE, nip);
418 			regs->msr |= MSR_RI;
419 			regs->nip = extable_fixup(entry);
420 			return 1;
421 		}
422 	}
423 #endif /* CONFIG_PPC32 */
424 	return 0;
425 }
426 
427 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
428 /* On 4xx, the reason for the machine check or program exception
429    is in the ESR. */
430 #define get_reason(regs)	((regs)->dsisr)
431 #define REASON_FP		ESR_FP
432 #define REASON_ILLEGAL		(ESR_PIL | ESR_PUO)
433 #define REASON_PRIVILEGED	ESR_PPR
434 #define REASON_TRAP		ESR_PTR
435 
436 /* single-step stuff */
437 #define single_stepping(regs)	(current->thread.debug.dbcr0 & DBCR0_IC)
438 #define clear_single_step(regs)	(current->thread.debug.dbcr0 &= ~DBCR0_IC)
439 
440 #else
441 /* On non-4xx, the reason for the machine check or program
442    exception is in the MSR. */
443 #define get_reason(regs)	((regs)->msr)
444 #define REASON_TM		SRR1_PROGTM
445 #define REASON_FP		SRR1_PROGFPE
446 #define REASON_ILLEGAL		SRR1_PROGILL
447 #define REASON_PRIVILEGED	SRR1_PROGPRIV
448 #define REASON_TRAP		SRR1_PROGTRAP
449 
450 #define single_stepping(regs)	((regs)->msr & MSR_SE)
451 #define clear_single_step(regs)	((regs)->msr &= ~MSR_SE)
452 #endif
453 
454 #if defined(CONFIG_E500)
455 int machine_check_e500mc(struct pt_regs *regs)
456 {
457 	unsigned long mcsr = mfspr(SPRN_MCSR);
458 	unsigned long pvr = mfspr(SPRN_PVR);
459 	unsigned long reason = mcsr;
460 	int recoverable = 1;
461 
462 	if (reason & MCSR_LD) {
463 		recoverable = fsl_rio_mcheck_exception(regs);
464 		if (recoverable == 1)
465 			goto silent_out;
466 	}
467 
468 	printk("Machine check in kernel mode.\n");
469 	printk("Caused by (from MCSR=%lx): ", reason);
470 
471 	if (reason & MCSR_MCP)
472 		printk("Machine Check Signal\n");
473 
474 	if (reason & MCSR_ICPERR) {
475 		printk("Instruction Cache Parity Error\n");
476 
477 		/*
478 		 * This is recoverable by invalidating the i-cache.
479 		 */
480 		mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
481 		while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
482 			;
483 
484 		/*
485 		 * This will generally be accompanied by an instruction
486 		 * fetch error report -- only treat MCSR_IF as fatal
487 		 * if it wasn't due to an L1 parity error.
488 		 */
489 		reason &= ~MCSR_IF;
490 	}
491 
492 	if (reason & MCSR_DCPERR_MC) {
493 		printk("Data Cache Parity Error\n");
494 
495 		/*
496 		 * In write shadow mode we auto-recover from the error, but it
497 		 * may still get logged and cause a machine check.  We should
498 		 * only treat the non-write shadow case as non-recoverable.
499 		 */
500 		/* On e6500 core, L1 DCWS (Data cache write shadow mode) bit
501 		 * is not implemented but L1 data cache always runs in write
502 		 * shadow mode. Hence on data cache parity errors HW will
503 		 * automatically invalidate the L1 Data Cache.
504 		 */
505 		if (PVR_VER(pvr) != PVR_VER_E6500) {
506 			if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
507 				recoverable = 0;
508 		}
509 	}
510 
511 	if (reason & MCSR_L2MMU_MHIT) {
512 		printk("Hit on multiple TLB entries\n");
513 		recoverable = 0;
514 	}
515 
516 	if (reason & MCSR_NMI)
517 		printk("Non-maskable interrupt\n");
518 
519 	if (reason & MCSR_IF) {
520 		printk("Instruction Fetch Error Report\n");
521 		recoverable = 0;
522 	}
523 
524 	if (reason & MCSR_LD) {
525 		printk("Load Error Report\n");
526 		recoverable = 0;
527 	}
528 
529 	if (reason & MCSR_ST) {
530 		printk("Store Error Report\n");
531 		recoverable = 0;
532 	}
533 
534 	if (reason & MCSR_LDG) {
535 		printk("Guarded Load Error Report\n");
536 		recoverable = 0;
537 	}
538 
539 	if (reason & MCSR_TLBSYNC)
540 		printk("Simultaneous tlbsync operations\n");
541 
542 	if (reason & MCSR_BSL2_ERR) {
543 		printk("Level 2 Cache Error\n");
544 		recoverable = 0;
545 	}
546 
547 	if (reason & MCSR_MAV) {
548 		u64 addr;
549 
550 		addr = mfspr(SPRN_MCAR);
551 		addr |= (u64)mfspr(SPRN_MCARU) << 32;
552 
553 		printk("Machine Check %s Address: %#llx\n",
554 		       reason & MCSR_MEA ? "Effective" : "Physical", addr);
555 	}
556 
557 silent_out:
558 	mtspr(SPRN_MCSR, mcsr);
559 	return mfspr(SPRN_MCSR) == 0 && recoverable;
560 }
561 
562 int machine_check_e500(struct pt_regs *regs)
563 {
564 	unsigned long reason = mfspr(SPRN_MCSR);
565 
566 	if (reason & MCSR_BUS_RBERR) {
567 		if (fsl_rio_mcheck_exception(regs))
568 			return 1;
569 		if (fsl_pci_mcheck_exception(regs))
570 			return 1;
571 	}
572 
573 	printk("Machine check in kernel mode.\n");
574 	printk("Caused by (from MCSR=%lx): ", reason);
575 
576 	if (reason & MCSR_MCP)
577 		printk("Machine Check Signal\n");
578 	if (reason & MCSR_ICPERR)
579 		printk("Instruction Cache Parity Error\n");
580 	if (reason & MCSR_DCP_PERR)
581 		printk("Data Cache Push Parity Error\n");
582 	if (reason & MCSR_DCPERR)
583 		printk("Data Cache Parity Error\n");
584 	if (reason & MCSR_BUS_IAERR)
585 		printk("Bus - Instruction Address Error\n");
586 	if (reason & MCSR_BUS_RAERR)
587 		printk("Bus - Read Address Error\n");
588 	if (reason & MCSR_BUS_WAERR)
589 		printk("Bus - Write Address Error\n");
590 	if (reason & MCSR_BUS_IBERR)
591 		printk("Bus - Instruction Data Error\n");
592 	if (reason & MCSR_BUS_RBERR)
593 		printk("Bus - Read Data Bus Error\n");
594 	if (reason & MCSR_BUS_WBERR)
595 		printk("Bus - Write Data Bus Error\n");
596 	if (reason & MCSR_BUS_IPERR)
597 		printk("Bus - Instruction Parity Error\n");
598 	if (reason & MCSR_BUS_RPERR)
599 		printk("Bus - Read Parity Error\n");
600 
601 	return 0;
602 }
603 
604 int machine_check_generic(struct pt_regs *regs)
605 {
606 	return 0;
607 }
608 #elif defined(CONFIG_E200)
609 int machine_check_e200(struct pt_regs *regs)
610 {
611 	unsigned long reason = mfspr(SPRN_MCSR);
612 
613 	printk("Machine check in kernel mode.\n");
614 	printk("Caused by (from MCSR=%lx): ", reason);
615 
616 	if (reason & MCSR_MCP)
617 		printk("Machine Check Signal\n");
618 	if (reason & MCSR_CP_PERR)
619 		printk("Cache Push Parity Error\n");
620 	if (reason & MCSR_CPERR)
621 		printk("Cache Parity Error\n");
622 	if (reason & MCSR_EXCP_ERR)
623 		printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
624 	if (reason & MCSR_BUS_IRERR)
625 		printk("Bus - Read Bus Error on instruction fetch\n");
626 	if (reason & MCSR_BUS_DRERR)
627 		printk("Bus - Read Bus Error on data load\n");
628 	if (reason & MCSR_BUS_WRERR)
629 		printk("Bus - Write Bus Error on buffered store or cache line push\n");
630 
631 	return 0;
632 }
633 #elif defined(CONFIG_PPC32)
634 int machine_check_generic(struct pt_regs *regs)
635 {
636 	unsigned long reason = regs->msr;
637 
638 	printk("Machine check in kernel mode.\n");
639 	printk("Caused by (from SRR1=%lx): ", reason);
640 	switch (reason & 0x601F0000) {
641 	case 0x80000:
642 		printk("Machine check signal\n");
643 		break;
644 	case 0:		/* for 601 */
645 	case 0x40000:
646 	case 0x140000:	/* 7450 MSS error and TEA */
647 		printk("Transfer error ack signal\n");
648 		break;
649 	case 0x20000:
650 		printk("Data parity error signal\n");
651 		break;
652 	case 0x10000:
653 		printk("Address parity error signal\n");
654 		break;
655 	case 0x20000000:
656 		printk("L1 Data Cache error\n");
657 		break;
658 	case 0x40000000:
659 		printk("L1 Instruction Cache error\n");
660 		break;
661 	case 0x00100000:
662 		printk("L2 data cache parity error\n");
663 		break;
664 	default:
665 		printk("Unknown values in msr\n");
666 	}
667 	return 0;
668 }
669 #endif /* everything else */
670 
671 void machine_check_exception(struct pt_regs *regs)
672 {
673 	int recover = 0;
674 	bool nested = in_nmi();
675 	if (!nested)
676 		nmi_enter();
677 
678 	/* 64s accounts the mce in machine_check_early when in HVMODE */
679 	if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !cpu_has_feature(CPU_FTR_HVMODE))
680 		__this_cpu_inc(irq_stat.mce_exceptions);
681 
682 	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
683 
684 	/* See if any machine dependent calls. In theory, we would want
685 	 * to call the CPU first, and call the ppc_md. one if the CPU
686 	 * one returns a positive number. However there is existing code
687 	 * that assumes the board gets a first chance, so let's keep it
688 	 * that way for now and fix things later. --BenH.
689 	 */
690 	if (ppc_md.machine_check_exception)
691 		recover = ppc_md.machine_check_exception(regs);
692 	else if (cur_cpu_spec->machine_check)
693 		recover = cur_cpu_spec->machine_check(regs);
694 
695 	if (recover > 0)
696 		goto bail;
697 
698 	if (debugger_fault_handler(regs))
699 		goto bail;
700 
701 	if (check_io_access(regs))
702 		goto bail;
703 
704 	die("Machine check", regs, SIGBUS);
705 
706 	/* Must die if the interrupt is not recoverable */
707 	if (!(regs->msr & MSR_RI))
708 		nmi_panic(regs, "Unrecoverable Machine check");
709 
710 bail:
711 	if (!nested)
712 		nmi_exit();
713 }
714 
715 void SMIException(struct pt_regs *regs)
716 {
717 	die("System Management Interrupt", regs, SIGABRT);
718 }
719 
720 #ifdef CONFIG_VSX
721 static void p9_hmi_special_emu(struct pt_regs *regs)
722 {
723 	unsigned int ra, rb, t, i, sel, instr, rc;
724 	const void __user *addr;
725 	u8 vbuf[16], *vdst;
726 	unsigned long ea, msr, msr_mask;
727 	bool swap;
728 
729 	if (__get_user_inatomic(instr, (unsigned int __user *)regs->nip))
730 		return;
731 
732 	/*
733 	 * lxvb16x	opcode: 0x7c0006d8
734 	 * lxvd2x	opcode: 0x7c000698
735 	 * lxvh8x	opcode: 0x7c000658
736 	 * lxvw4x	opcode: 0x7c000618
737 	 */
738 	if ((instr & 0xfc00073e) != 0x7c000618) {
739 		pr_devel("HMI vec emu: not vector CI %i:%s[%d] nip=%016lx"
740 			 " instr=%08x\n",
741 			 smp_processor_id(), current->comm, current->pid,
742 			 regs->nip, instr);
743 		return;
744 	}
745 
746 	/* Grab vector registers into the task struct */
747 	msr = regs->msr; /* Grab msr before we flush the bits */
748 	flush_vsx_to_thread(current);
749 	enable_kernel_altivec();
750 
751 	/*
752 	 * Is userspace running with a different endian (this is rare but
753 	 * not impossible)
754 	 */
755 	swap = (msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
756 
757 	/* Decode the instruction */
758 	ra = (instr >> 16) & 0x1f;
759 	rb = (instr >> 11) & 0x1f;
760 	t = (instr >> 21) & 0x1f;
761 	if (instr & 1)
762 		vdst = (u8 *)&current->thread.vr_state.vr[t];
763 	else
764 		vdst = (u8 *)&current->thread.fp_state.fpr[t][0];
765 
766 	/* Grab the vector address */
767 	ea = regs->gpr[rb] + (ra ? regs->gpr[ra] : 0);
768 	if (is_32bit_task())
769 		ea &= 0xfffffffful;
770 	addr = (__force const void __user *)ea;
771 
772 	/* Check it */
773 	if (!access_ok(VERIFY_READ, addr, 16)) {
774 		pr_devel("HMI vec emu: bad access %i:%s[%d] nip=%016lx"
775 			 " instr=%08x addr=%016lx\n",
776 			 smp_processor_id(), current->comm, current->pid,
777 			 regs->nip, instr, (unsigned long)addr);
778 		return;
779 	}
780 
781 	/* Read the vector */
782 	rc = 0;
783 	if ((unsigned long)addr & 0xfUL)
784 		/* unaligned case */
785 		rc = __copy_from_user_inatomic(vbuf, addr, 16);
786 	else
787 		__get_user_atomic_128_aligned(vbuf, addr, rc);
788 	if (rc) {
789 		pr_devel("HMI vec emu: page fault %i:%s[%d] nip=%016lx"
790 			 " instr=%08x addr=%016lx\n",
791 			 smp_processor_id(), current->comm, current->pid,
792 			 regs->nip, instr, (unsigned long)addr);
793 		return;
794 	}
795 
796 	pr_devel("HMI vec emu: emulated vector CI %i:%s[%d] nip=%016lx"
797 		 " instr=%08x addr=%016lx\n",
798 		 smp_processor_id(), current->comm, current->pid, regs->nip,
799 		 instr, (unsigned long) addr);
800 
801 	/* Grab instruction "selector" */
802 	sel = (instr >> 6) & 3;
803 
804 	/*
805 	 * Check to make sure the facility is actually enabled. This
806 	 * could happen if we get a false positive hit.
807 	 *
808 	 * lxvd2x/lxvw4x always check MSR VSX sel = 0,2
809 	 * lxvh8x/lxvb16x check MSR VSX or VEC depending on VSR used sel = 1,3
810 	 */
811 	msr_mask = MSR_VSX;
812 	if ((sel & 1) && (instr & 1)) /* lxvh8x & lxvb16x + VSR >= 32 */
813 		msr_mask = MSR_VEC;
814 	if (!(msr & msr_mask)) {
815 		pr_devel("HMI vec emu: MSR fac clear %i:%s[%d] nip=%016lx"
816 			 " instr=%08x msr:%016lx\n",
817 			 smp_processor_id(), current->comm, current->pid,
818 			 regs->nip, instr, msr);
819 		return;
820 	}
821 
822 	/* Do logging here before we modify sel based on endian */
823 	switch (sel) {
824 	case 0:	/* lxvw4x */
825 		PPC_WARN_EMULATED(lxvw4x, regs);
826 		break;
827 	case 1: /* lxvh8x */
828 		PPC_WARN_EMULATED(lxvh8x, regs);
829 		break;
830 	case 2: /* lxvd2x */
831 		PPC_WARN_EMULATED(lxvd2x, regs);
832 		break;
833 	case 3: /* lxvb16x */
834 		PPC_WARN_EMULATED(lxvb16x, regs);
835 		break;
836 	}
837 
838 #ifdef __LITTLE_ENDIAN__
839 	/*
840 	 * An LE kernel stores the vector in the task struct as an LE
841 	 * byte array (effectively swapping both the components and
842 	 * the content of the components). Those instructions expect
843 	 * the components to remain in ascending address order, so we
844 	 * swap them back.
845 	 *
846 	 * If we are running a BE user space, the expectation is that
847 	 * of a simple memcpy, so forcing the emulation to look like
848 	 * a lxvb16x should do the trick.
849 	 */
850 	if (swap)
851 		sel = 3;
852 
853 	switch (sel) {
854 	case 0:	/* lxvw4x */
855 		for (i = 0; i < 4; i++)
856 			((u32 *)vdst)[i] = ((u32 *)vbuf)[3-i];
857 		break;
858 	case 1: /* lxvh8x */
859 		for (i = 0; i < 8; i++)
860 			((u16 *)vdst)[i] = ((u16 *)vbuf)[7-i];
861 		break;
862 	case 2: /* lxvd2x */
863 		for (i = 0; i < 2; i++)
864 			((u64 *)vdst)[i] = ((u64 *)vbuf)[1-i];
865 		break;
866 	case 3: /* lxvb16x */
867 		for (i = 0; i < 16; i++)
868 			vdst[i] = vbuf[15-i];
869 		break;
870 	}
871 #else /* __LITTLE_ENDIAN__ */
872 	/* On a big endian kernel, a BE userspace only needs a memcpy */
873 	if (!swap)
874 		sel = 3;
875 
876 	/* Otherwise, we need to swap the content of the components */
877 	switch (sel) {
878 	case 0:	/* lxvw4x */
879 		for (i = 0; i < 4; i++)
880 			((u32 *)vdst)[i] = cpu_to_le32(((u32 *)vbuf)[i]);
881 		break;
882 	case 1: /* lxvh8x */
883 		for (i = 0; i < 8; i++)
884 			((u16 *)vdst)[i] = cpu_to_le16(((u16 *)vbuf)[i]);
885 		break;
886 	case 2: /* lxvd2x */
887 		for (i = 0; i < 2; i++)
888 			((u64 *)vdst)[i] = cpu_to_le64(((u64 *)vbuf)[i]);
889 		break;
890 	case 3: /* lxvb16x */
891 		memcpy(vdst, vbuf, 16);
892 		break;
893 	}
894 #endif /* !__LITTLE_ENDIAN__ */
895 
896 	/* Go to next instruction */
897 	regs->nip += 4;
898 }
899 #endif /* CONFIG_VSX */
900 
901 void handle_hmi_exception(struct pt_regs *regs)
902 {
903 	struct pt_regs *old_regs;
904 
905 	old_regs = set_irq_regs(regs);
906 	irq_enter();
907 
908 #ifdef CONFIG_VSX
909 	/* Real mode flagged P9 special emu is needed */
910 	if (local_paca->hmi_p9_special_emu) {
911 		local_paca->hmi_p9_special_emu = 0;
912 
913 		/*
914 		 * We don't want to take page faults while doing the
915 		 * emulation, we just replay the instruction if necessary.
916 		 */
917 		pagefault_disable();
918 		p9_hmi_special_emu(regs);
919 		pagefault_enable();
920 	}
921 #endif /* CONFIG_VSX */
922 
923 	if (ppc_md.handle_hmi_exception)
924 		ppc_md.handle_hmi_exception(regs);
925 
926 	irq_exit();
927 	set_irq_regs(old_regs);
928 }
929 
930 void unknown_exception(struct pt_regs *regs)
931 {
932 	enum ctx_state prev_state = exception_enter();
933 
934 	printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
935 	       regs->nip, regs->msr, regs->trap);
936 
937 	_exception(SIGTRAP, regs, 0, 0);
938 
939 	exception_exit(prev_state);
940 }
941 
942 void instruction_breakpoint_exception(struct pt_regs *regs)
943 {
944 	enum ctx_state prev_state = exception_enter();
945 
946 	if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
947 					5, SIGTRAP) == NOTIFY_STOP)
948 		goto bail;
949 	if (debugger_iabr_match(regs))
950 		goto bail;
951 	_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
952 
953 bail:
954 	exception_exit(prev_state);
955 }
956 
957 void RunModeException(struct pt_regs *regs)
958 {
959 	_exception(SIGTRAP, regs, 0, 0);
960 }
961 
962 void single_step_exception(struct pt_regs *regs)
963 {
964 	enum ctx_state prev_state = exception_enter();
965 
966 	clear_single_step(regs);
967 
968 	if (kprobe_post_handler(regs))
969 		return;
970 
971 	if (notify_die(DIE_SSTEP, "single_step", regs, 5,
972 					5, SIGTRAP) == NOTIFY_STOP)
973 		goto bail;
974 	if (debugger_sstep(regs))
975 		goto bail;
976 
977 	_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
978 
979 bail:
980 	exception_exit(prev_state);
981 }
982 NOKPROBE_SYMBOL(single_step_exception);
983 
984 /*
985  * After we have successfully emulated an instruction, we have to
986  * check if the instruction was being single-stepped, and if so,
987  * pretend we got a single-step exception.  This was pointed out
988  * by Kumar Gala.  -- paulus
989  */
990 static void emulate_single_step(struct pt_regs *regs)
991 {
992 	if (single_stepping(regs))
993 		single_step_exception(regs);
994 }
995 
996 static inline int __parse_fpscr(unsigned long fpscr)
997 {
998 	int ret = 0;
999 
1000 	/* Invalid operation */
1001 	if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
1002 		ret = FPE_FLTINV;
1003 
1004 	/* Overflow */
1005 	else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
1006 		ret = FPE_FLTOVF;
1007 
1008 	/* Underflow */
1009 	else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
1010 		ret = FPE_FLTUND;
1011 
1012 	/* Divide by zero */
1013 	else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
1014 		ret = FPE_FLTDIV;
1015 
1016 	/* Inexact result */
1017 	else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
1018 		ret = FPE_FLTRES;
1019 
1020 	return ret;
1021 }
1022 
1023 static void parse_fpe(struct pt_regs *regs)
1024 {
1025 	int code = 0;
1026 
1027 	flush_fp_to_thread(current);
1028 
1029 	code = __parse_fpscr(current->thread.fp_state.fpscr);
1030 
1031 	_exception(SIGFPE, regs, code, regs->nip);
1032 }
1033 
1034 /*
1035  * Illegal instruction emulation support.  Originally written to
1036  * provide the PVR to user applications using the mfspr rd, PVR.
1037  * Return non-zero if we can't emulate, or -EFAULT if the associated
1038  * memory access caused an access fault.  Return zero on success.
1039  *
1040  * There are a couple of ways to do this, either "decode" the instruction
1041  * or directly match lots of bits.  In this case, matching lots of
1042  * bits is faster and easier.
1043  *
1044  */
1045 static int emulate_string_inst(struct pt_regs *regs, u32 instword)
1046 {
1047 	u8 rT = (instword >> 21) & 0x1f;
1048 	u8 rA = (instword >> 16) & 0x1f;
1049 	u8 NB_RB = (instword >> 11) & 0x1f;
1050 	u32 num_bytes;
1051 	unsigned long EA;
1052 	int pos = 0;
1053 
1054 	/* Early out if we are an invalid form of lswx */
1055 	if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
1056 		if ((rT == rA) || (rT == NB_RB))
1057 			return -EINVAL;
1058 
1059 	EA = (rA == 0) ? 0 : regs->gpr[rA];
1060 
1061 	switch (instword & PPC_INST_STRING_MASK) {
1062 		case PPC_INST_LSWX:
1063 		case PPC_INST_STSWX:
1064 			EA += NB_RB;
1065 			num_bytes = regs->xer & 0x7f;
1066 			break;
1067 		case PPC_INST_LSWI:
1068 		case PPC_INST_STSWI:
1069 			num_bytes = (NB_RB == 0) ? 32 : NB_RB;
1070 			break;
1071 		default:
1072 			return -EINVAL;
1073 	}
1074 
1075 	while (num_bytes != 0)
1076 	{
1077 		u8 val;
1078 		u32 shift = 8 * (3 - (pos & 0x3));
1079 
1080 		/* if process is 32-bit, clear upper 32 bits of EA */
1081 		if ((regs->msr & MSR_64BIT) == 0)
1082 			EA &= 0xFFFFFFFF;
1083 
1084 		switch ((instword & PPC_INST_STRING_MASK)) {
1085 			case PPC_INST_LSWX:
1086 			case PPC_INST_LSWI:
1087 				if (get_user(val, (u8 __user *)EA))
1088 					return -EFAULT;
1089 				/* first time updating this reg,
1090 				 * zero it out */
1091 				if (pos == 0)
1092 					regs->gpr[rT] = 0;
1093 				regs->gpr[rT] |= val << shift;
1094 				break;
1095 			case PPC_INST_STSWI:
1096 			case PPC_INST_STSWX:
1097 				val = regs->gpr[rT] >> shift;
1098 				if (put_user(val, (u8 __user *)EA))
1099 					return -EFAULT;
1100 				break;
1101 		}
1102 		/* move EA to next address */
1103 		EA += 1;
1104 		num_bytes--;
1105 
1106 		/* manage our position within the register */
1107 		if (++pos == 4) {
1108 			pos = 0;
1109 			if (++rT == 32)
1110 				rT = 0;
1111 		}
1112 	}
1113 
1114 	return 0;
1115 }
1116 
1117 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
1118 {
1119 	u32 ra,rs;
1120 	unsigned long tmp;
1121 
1122 	ra = (instword >> 16) & 0x1f;
1123 	rs = (instword >> 21) & 0x1f;
1124 
1125 	tmp = regs->gpr[rs];
1126 	tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
1127 	tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
1128 	tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1129 	regs->gpr[ra] = tmp;
1130 
1131 	return 0;
1132 }
1133 
1134 static int emulate_isel(struct pt_regs *regs, u32 instword)
1135 {
1136 	u8 rT = (instword >> 21) & 0x1f;
1137 	u8 rA = (instword >> 16) & 0x1f;
1138 	u8 rB = (instword >> 11) & 0x1f;
1139 	u8 BC = (instword >> 6) & 0x1f;
1140 	u8 bit;
1141 	unsigned long tmp;
1142 
1143 	tmp = (rA == 0) ? 0 : regs->gpr[rA];
1144 	bit = (regs->ccr >> (31 - BC)) & 0x1;
1145 
1146 	regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
1147 
1148 	return 0;
1149 }
1150 
1151 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1152 static inline bool tm_abort_check(struct pt_regs *regs, int cause)
1153 {
1154         /* If we're emulating a load/store in an active transaction, we cannot
1155          * emulate it as the kernel operates in transaction suspended context.
1156          * We need to abort the transaction.  This creates a persistent TM
1157          * abort so tell the user what caused it with a new code.
1158 	 */
1159 	if (MSR_TM_TRANSACTIONAL(regs->msr)) {
1160 		tm_enable();
1161 		tm_abort(cause);
1162 		return true;
1163 	}
1164 	return false;
1165 }
1166 #else
1167 static inline bool tm_abort_check(struct pt_regs *regs, int reason)
1168 {
1169 	return false;
1170 }
1171 #endif
1172 
1173 static int emulate_instruction(struct pt_regs *regs)
1174 {
1175 	u32 instword;
1176 	u32 rd;
1177 
1178 	if (!user_mode(regs))
1179 		return -EINVAL;
1180 	CHECK_FULL_REGS(regs);
1181 
1182 	if (get_user(instword, (u32 __user *)(regs->nip)))
1183 		return -EFAULT;
1184 
1185 	/* Emulate the mfspr rD, PVR. */
1186 	if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
1187 		PPC_WARN_EMULATED(mfpvr, regs);
1188 		rd = (instword >> 21) & 0x1f;
1189 		regs->gpr[rd] = mfspr(SPRN_PVR);
1190 		return 0;
1191 	}
1192 
1193 	/* Emulating the dcba insn is just a no-op.  */
1194 	if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
1195 		PPC_WARN_EMULATED(dcba, regs);
1196 		return 0;
1197 	}
1198 
1199 	/* Emulate the mcrxr insn.  */
1200 	if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
1201 		int shift = (instword >> 21) & 0x1c;
1202 		unsigned long msk = 0xf0000000UL >> shift;
1203 
1204 		PPC_WARN_EMULATED(mcrxr, regs);
1205 		regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
1206 		regs->xer &= ~0xf0000000UL;
1207 		return 0;
1208 	}
1209 
1210 	/* Emulate load/store string insn. */
1211 	if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
1212 		if (tm_abort_check(regs,
1213 				   TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
1214 			return -EINVAL;
1215 		PPC_WARN_EMULATED(string, regs);
1216 		return emulate_string_inst(regs, instword);
1217 	}
1218 
1219 	/* Emulate the popcntb (Population Count Bytes) instruction. */
1220 	if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
1221 		PPC_WARN_EMULATED(popcntb, regs);
1222 		return emulate_popcntb_inst(regs, instword);
1223 	}
1224 
1225 	/* Emulate isel (Integer Select) instruction */
1226 	if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
1227 		PPC_WARN_EMULATED(isel, regs);
1228 		return emulate_isel(regs, instword);
1229 	}
1230 
1231 	/* Emulate sync instruction variants */
1232 	if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) {
1233 		PPC_WARN_EMULATED(sync, regs);
1234 		asm volatile("sync");
1235 		return 0;
1236 	}
1237 
1238 #ifdef CONFIG_PPC64
1239 	/* Emulate the mfspr rD, DSCR. */
1240 	if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
1241 		PPC_INST_MFSPR_DSCR_USER) ||
1242 	     ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
1243 		PPC_INST_MFSPR_DSCR)) &&
1244 			cpu_has_feature(CPU_FTR_DSCR)) {
1245 		PPC_WARN_EMULATED(mfdscr, regs);
1246 		rd = (instword >> 21) & 0x1f;
1247 		regs->gpr[rd] = mfspr(SPRN_DSCR);
1248 		return 0;
1249 	}
1250 	/* Emulate the mtspr DSCR, rD. */
1251 	if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
1252 		PPC_INST_MTSPR_DSCR_USER) ||
1253 	     ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
1254 		PPC_INST_MTSPR_DSCR)) &&
1255 			cpu_has_feature(CPU_FTR_DSCR)) {
1256 		PPC_WARN_EMULATED(mtdscr, regs);
1257 		rd = (instword >> 21) & 0x1f;
1258 		current->thread.dscr = regs->gpr[rd];
1259 		current->thread.dscr_inherit = 1;
1260 		mtspr(SPRN_DSCR, current->thread.dscr);
1261 		return 0;
1262 	}
1263 #endif
1264 
1265 	return -EINVAL;
1266 }
1267 
1268 int is_valid_bugaddr(unsigned long addr)
1269 {
1270 	return is_kernel_addr(addr);
1271 }
1272 
1273 #ifdef CONFIG_MATH_EMULATION
1274 static int emulate_math(struct pt_regs *regs)
1275 {
1276 	int ret;
1277 	extern int do_mathemu(struct pt_regs *regs);
1278 
1279 	ret = do_mathemu(regs);
1280 	if (ret >= 0)
1281 		PPC_WARN_EMULATED(math, regs);
1282 
1283 	switch (ret) {
1284 	case 0:
1285 		emulate_single_step(regs);
1286 		return 0;
1287 	case 1: {
1288 			int code = 0;
1289 			code = __parse_fpscr(current->thread.fp_state.fpscr);
1290 			_exception(SIGFPE, regs, code, regs->nip);
1291 			return 0;
1292 		}
1293 	case -EFAULT:
1294 		_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1295 		return 0;
1296 	}
1297 
1298 	return -1;
1299 }
1300 #else
1301 static inline int emulate_math(struct pt_regs *regs) { return -1; }
1302 #endif
1303 
1304 void program_check_exception(struct pt_regs *regs)
1305 {
1306 	enum ctx_state prev_state = exception_enter();
1307 	unsigned int reason = get_reason(regs);
1308 
1309 	/* We can now get here via a FP Unavailable exception if the core
1310 	 * has no FPU, in that case the reason flags will be 0 */
1311 
1312 	if (reason & REASON_FP) {
1313 		/* IEEE FP exception */
1314 		parse_fpe(regs);
1315 		goto bail;
1316 	}
1317 	if (reason & REASON_TRAP) {
1318 		unsigned long bugaddr;
1319 		/* Debugger is first in line to stop recursive faults in
1320 		 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1321 		if (debugger_bpt(regs))
1322 			goto bail;
1323 
1324 		if (kprobe_handler(regs))
1325 			goto bail;
1326 
1327 		/* trap exception */
1328 		if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
1329 				== NOTIFY_STOP)
1330 			goto bail;
1331 
1332 		bugaddr = regs->nip;
1333 		/*
1334 		 * Fixup bugaddr for BUG_ON() in real mode
1335 		 */
1336 		if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR))
1337 			bugaddr += PAGE_OFFSET;
1338 
1339 		if (!(regs->msr & MSR_PR) &&  /* not user-mode */
1340 		    report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
1341 			regs->nip += 4;
1342 			goto bail;
1343 		}
1344 		_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1345 		goto bail;
1346 	}
1347 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1348 	if (reason & REASON_TM) {
1349 		/* This is a TM "Bad Thing Exception" program check.
1350 		 * This occurs when:
1351 		 * -  An rfid/hrfid/mtmsrd attempts to cause an illegal
1352 		 *    transition in TM states.
1353 		 * -  A trechkpt is attempted when transactional.
1354 		 * -  A treclaim is attempted when non transactional.
1355 		 * -  A tend is illegally attempted.
1356 		 * -  writing a TM SPR when transactional.
1357 		 *
1358 		 * If usermode caused this, it's done something illegal and
1359 		 * gets a SIGILL slap on the wrist.  We call it an illegal
1360 		 * operand to distinguish from the instruction just being bad
1361 		 * (e.g. executing a 'tend' on a CPU without TM!); it's an
1362 		 * illegal /placement/ of a valid instruction.
1363 		 */
1364 		if (user_mode(regs)) {
1365 			_exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
1366 			goto bail;
1367 		} else {
1368 			printk(KERN_EMERG "Unexpected TM Bad Thing exception "
1369 			       "at %lx (msr 0x%x)\n", regs->nip, reason);
1370 			die("Unrecoverable exception", regs, SIGABRT);
1371 		}
1372 	}
1373 #endif
1374 
1375 	/*
1376 	 * If we took the program check in the kernel skip down to sending a
1377 	 * SIGILL. The subsequent cases all relate to emulating instructions
1378 	 * which we should only do for userspace. We also do not want to enable
1379 	 * interrupts for kernel faults because that might lead to further
1380 	 * faults, and loose the context of the original exception.
1381 	 */
1382 	if (!user_mode(regs))
1383 		goto sigill;
1384 
1385 	/* We restore the interrupt state now */
1386 	if (!arch_irq_disabled_regs(regs))
1387 		local_irq_enable();
1388 
1389 	/* (reason & REASON_ILLEGAL) would be the obvious thing here,
1390 	 * but there seems to be a hardware bug on the 405GP (RevD)
1391 	 * that means ESR is sometimes set incorrectly - either to
1392 	 * ESR_DST (!?) or 0.  In the process of chasing this with the
1393 	 * hardware people - not sure if it can happen on any illegal
1394 	 * instruction or only on FP instructions, whether there is a
1395 	 * pattern to occurrences etc. -dgibson 31/Mar/2003
1396 	 */
1397 	if (!emulate_math(regs))
1398 		goto bail;
1399 
1400 	/* Try to emulate it if we should. */
1401 	if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
1402 		switch (emulate_instruction(regs)) {
1403 		case 0:
1404 			regs->nip += 4;
1405 			emulate_single_step(regs);
1406 			goto bail;
1407 		case -EFAULT:
1408 			_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1409 			goto bail;
1410 		}
1411 	}
1412 
1413 sigill:
1414 	if (reason & REASON_PRIVILEGED)
1415 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1416 	else
1417 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1418 
1419 bail:
1420 	exception_exit(prev_state);
1421 }
1422 NOKPROBE_SYMBOL(program_check_exception);
1423 
1424 /*
1425  * This occurs when running in hypervisor mode on POWER6 or later
1426  * and an illegal instruction is encountered.
1427  */
1428 void emulation_assist_interrupt(struct pt_regs *regs)
1429 {
1430 	regs->msr |= REASON_ILLEGAL;
1431 	program_check_exception(regs);
1432 }
1433 NOKPROBE_SYMBOL(emulation_assist_interrupt);
1434 
1435 void alignment_exception(struct pt_regs *regs)
1436 {
1437 	enum ctx_state prev_state = exception_enter();
1438 	int sig, code, fixed = 0;
1439 
1440 	/* We restore the interrupt state now */
1441 	if (!arch_irq_disabled_regs(regs))
1442 		local_irq_enable();
1443 
1444 	if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
1445 		goto bail;
1446 
1447 	/* we don't implement logging of alignment exceptions */
1448 	if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
1449 		fixed = fix_alignment(regs);
1450 
1451 	if (fixed == 1) {
1452 		regs->nip += 4;	/* skip over emulated instruction */
1453 		emulate_single_step(regs);
1454 		goto bail;
1455 	}
1456 
1457 	/* Operand address was bad */
1458 	if (fixed == -EFAULT) {
1459 		sig = SIGSEGV;
1460 		code = SEGV_ACCERR;
1461 	} else {
1462 		sig = SIGBUS;
1463 		code = BUS_ADRALN;
1464 	}
1465 	if (user_mode(regs))
1466 		_exception(sig, regs, code, regs->dar);
1467 	else
1468 		bad_page_fault(regs, regs->dar, sig);
1469 
1470 bail:
1471 	exception_exit(prev_state);
1472 }
1473 
1474 void slb_miss_bad_addr(struct pt_regs *regs)
1475 {
1476 	enum ctx_state prev_state = exception_enter();
1477 
1478 	if (user_mode(regs))
1479 		_exception(SIGSEGV, regs, SEGV_BNDERR, regs->dar);
1480 	else
1481 		bad_page_fault(regs, regs->dar, SIGSEGV);
1482 
1483 	exception_exit(prev_state);
1484 }
1485 
1486 void StackOverflow(struct pt_regs *regs)
1487 {
1488 	printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
1489 	       current, regs->gpr[1]);
1490 	debugger(regs);
1491 	show_regs(regs);
1492 	panic("kernel stack overflow");
1493 }
1494 
1495 void nonrecoverable_exception(struct pt_regs *regs)
1496 {
1497 	printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
1498 	       regs->nip, regs->msr);
1499 	debugger(regs);
1500 	die("nonrecoverable exception", regs, SIGKILL);
1501 }
1502 
1503 void kernel_fp_unavailable_exception(struct pt_regs *regs)
1504 {
1505 	enum ctx_state prev_state = exception_enter();
1506 
1507 	printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
1508 			  "%lx at %lx\n", regs->trap, regs->nip);
1509 	die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
1510 
1511 	exception_exit(prev_state);
1512 }
1513 
1514 void altivec_unavailable_exception(struct pt_regs *regs)
1515 {
1516 	enum ctx_state prev_state = exception_enter();
1517 
1518 	if (user_mode(regs)) {
1519 		/* A user program has executed an altivec instruction,
1520 		   but this kernel doesn't support altivec. */
1521 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1522 		goto bail;
1523 	}
1524 
1525 	printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
1526 			"%lx at %lx\n", regs->trap, regs->nip);
1527 	die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
1528 
1529 bail:
1530 	exception_exit(prev_state);
1531 }
1532 
1533 void vsx_unavailable_exception(struct pt_regs *regs)
1534 {
1535 	if (user_mode(regs)) {
1536 		/* A user program has executed an vsx instruction,
1537 		   but this kernel doesn't support vsx. */
1538 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1539 		return;
1540 	}
1541 
1542 	printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
1543 			"%lx at %lx\n", regs->trap, regs->nip);
1544 	die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
1545 }
1546 
1547 #ifdef CONFIG_PPC64
1548 static void tm_unavailable(struct pt_regs *regs)
1549 {
1550 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1551 	if (user_mode(regs)) {
1552 		current->thread.load_tm++;
1553 		regs->msr |= MSR_TM;
1554 		tm_enable();
1555 		tm_restore_sprs(&current->thread);
1556 		return;
1557 	}
1558 #endif
1559 	pr_emerg("Unrecoverable TM Unavailable Exception "
1560 			"%lx at %lx\n", regs->trap, regs->nip);
1561 	die("Unrecoverable TM Unavailable Exception", regs, SIGABRT);
1562 }
1563 
1564 void facility_unavailable_exception(struct pt_regs *regs)
1565 {
1566 	static char *facility_strings[] = {
1567 		[FSCR_FP_LG] = "FPU",
1568 		[FSCR_VECVSX_LG] = "VMX/VSX",
1569 		[FSCR_DSCR_LG] = "DSCR",
1570 		[FSCR_PM_LG] = "PMU SPRs",
1571 		[FSCR_BHRB_LG] = "BHRB",
1572 		[FSCR_TM_LG] = "TM",
1573 		[FSCR_EBB_LG] = "EBB",
1574 		[FSCR_TAR_LG] = "TAR",
1575 		[FSCR_MSGP_LG] = "MSGP",
1576 		[FSCR_SCV_LG] = "SCV",
1577 	};
1578 	char *facility = "unknown";
1579 	u64 value;
1580 	u32 instword, rd;
1581 	u8 status;
1582 	bool hv;
1583 
1584 	hv = (TRAP(regs) == 0xf80);
1585 	if (hv)
1586 		value = mfspr(SPRN_HFSCR);
1587 	else
1588 		value = mfspr(SPRN_FSCR);
1589 
1590 	status = value >> 56;
1591 	if (status == FSCR_DSCR_LG) {
1592 		/*
1593 		 * User is accessing the DSCR register using the problem
1594 		 * state only SPR number (0x03) either through a mfspr or
1595 		 * a mtspr instruction. If it is a write attempt through
1596 		 * a mtspr, then we set the inherit bit. This also allows
1597 		 * the user to write or read the register directly in the
1598 		 * future by setting via the FSCR DSCR bit. But in case it
1599 		 * is a read DSCR attempt through a mfspr instruction, we
1600 		 * just emulate the instruction instead. This code path will
1601 		 * always emulate all the mfspr instructions till the user
1602 		 * has attempted at least one mtspr instruction. This way it
1603 		 * preserves the same behaviour when the user is accessing
1604 		 * the DSCR through privilege level only SPR number (0x11)
1605 		 * which is emulated through illegal instruction exception.
1606 		 * We always leave HFSCR DSCR set.
1607 		 */
1608 		if (get_user(instword, (u32 __user *)(regs->nip))) {
1609 			pr_err("Failed to fetch the user instruction\n");
1610 			return;
1611 		}
1612 
1613 		/* Write into DSCR (mtspr 0x03, RS) */
1614 		if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK)
1615 				== PPC_INST_MTSPR_DSCR_USER) {
1616 			rd = (instword >> 21) & 0x1f;
1617 			current->thread.dscr = regs->gpr[rd];
1618 			current->thread.dscr_inherit = 1;
1619 			current->thread.fscr |= FSCR_DSCR;
1620 			mtspr(SPRN_FSCR, current->thread.fscr);
1621 		}
1622 
1623 		/* Read from DSCR (mfspr RT, 0x03) */
1624 		if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK)
1625 				== PPC_INST_MFSPR_DSCR_USER) {
1626 			if (emulate_instruction(regs)) {
1627 				pr_err("DSCR based mfspr emulation failed\n");
1628 				return;
1629 			}
1630 			regs->nip += 4;
1631 			emulate_single_step(regs);
1632 		}
1633 		return;
1634 	}
1635 
1636 	if (status == FSCR_TM_LG) {
1637 		/*
1638 		 * If we're here then the hardware is TM aware because it
1639 		 * generated an exception with FSRM_TM set.
1640 		 *
1641 		 * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware
1642 		 * told us not to do TM, or the kernel is not built with TM
1643 		 * support.
1644 		 *
1645 		 * If both of those things are true, then userspace can spam the
1646 		 * console by triggering the printk() below just by continually
1647 		 * doing tbegin (or any TM instruction). So in that case just
1648 		 * send the process a SIGILL immediately.
1649 		 */
1650 		if (!cpu_has_feature(CPU_FTR_TM))
1651 			goto out;
1652 
1653 		tm_unavailable(regs);
1654 		return;
1655 	}
1656 
1657 	if ((hv || status >= 2) &&
1658 	    (status < ARRAY_SIZE(facility_strings)) &&
1659 	    facility_strings[status])
1660 		facility = facility_strings[status];
1661 
1662 	/* We restore the interrupt state now */
1663 	if (!arch_irq_disabled_regs(regs))
1664 		local_irq_enable();
1665 
1666 	pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n",
1667 		hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr);
1668 
1669 out:
1670 	if (user_mode(regs)) {
1671 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1672 		return;
1673 	}
1674 
1675 	die("Unexpected facility unavailable exception", regs, SIGABRT);
1676 }
1677 #endif
1678 
1679 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1680 
1681 void fp_unavailable_tm(struct pt_regs *regs)
1682 {
1683 	/* Note:  This does not handle any kind of FP laziness. */
1684 
1685 	TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
1686 		 regs->nip, regs->msr);
1687 
1688         /* We can only have got here if the task started using FP after
1689          * beginning the transaction.  So, the transactional regs are just a
1690          * copy of the checkpointed ones.  But, we still need to recheckpoint
1691          * as we're enabling FP for the process; it will return, abort the
1692          * transaction, and probably retry but now with FP enabled.  So the
1693          * checkpointed FP registers need to be loaded.
1694 	 */
1695 	tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1696 	/* Reclaim didn't save out any FPRs to transact_fprs. */
1697 
1698 	/* Enable FP for the task: */
1699 	current->thread.load_fp = 1;
1700 
1701 	/* This loads and recheckpoints the FP registers from
1702 	 * thread.fpr[].  They will remain in registers after the
1703 	 * checkpoint so we don't need to reload them after.
1704 	 * If VMX is in use, the VRs now hold checkpointed values,
1705 	 * so we don't want to load the VRs from the thread_struct.
1706 	 */
1707 	tm_recheckpoint(&current->thread);
1708 }
1709 
1710 void altivec_unavailable_tm(struct pt_regs *regs)
1711 {
1712 	/* See the comments in fp_unavailable_tm().  This function operates
1713 	 * the same way.
1714 	 */
1715 
1716 	TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
1717 		 "MSR=%lx\n",
1718 		 regs->nip, regs->msr);
1719 	tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1720 	current->thread.load_vec = 1;
1721 	tm_recheckpoint(&current->thread);
1722 	current->thread.used_vr = 1;
1723 }
1724 
1725 void vsx_unavailable_tm(struct pt_regs *regs)
1726 {
1727 	/* See the comments in fp_unavailable_tm().  This works similarly,
1728 	 * though we're loading both FP and VEC registers in here.
1729 	 *
1730 	 * If FP isn't in use, load FP regs.  If VEC isn't in use, load VEC
1731 	 * regs.  Either way, set MSR_VSX.
1732 	 */
1733 
1734 	TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
1735 		 "MSR=%lx\n",
1736 		 regs->nip, regs->msr);
1737 
1738 	current->thread.used_vsr = 1;
1739 
1740 	/* This reclaims FP and/or VR regs if they're already enabled */
1741 	tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1742 
1743 	current->thread.load_vec = 1;
1744 	current->thread.load_fp = 1;
1745 
1746 	tm_recheckpoint(&current->thread);
1747 }
1748 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1749 
1750 void performance_monitor_exception(struct pt_regs *regs)
1751 {
1752 	__this_cpu_inc(irq_stat.pmu_irqs);
1753 
1754 	perf_irq(regs);
1755 }
1756 
1757 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1758 static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
1759 {
1760 	int changed = 0;
1761 	/*
1762 	 * Determine the cause of the debug event, clear the
1763 	 * event flags and send a trap to the handler. Torez
1764 	 */
1765 	if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1766 		dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1767 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1768 		current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
1769 #endif
1770 		do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT,
1771 			     5);
1772 		changed |= 0x01;
1773 	}  else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
1774 		dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
1775 		do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT,
1776 			     6);
1777 		changed |= 0x01;
1778 	}  else if (debug_status & DBSR_IAC1) {
1779 		current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
1780 		dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
1781 		do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT,
1782 			     1);
1783 		changed |= 0x01;
1784 	}  else if (debug_status & DBSR_IAC2) {
1785 		current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
1786 		do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT,
1787 			     2);
1788 		changed |= 0x01;
1789 	}  else if (debug_status & DBSR_IAC3) {
1790 		current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
1791 		dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
1792 		do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT,
1793 			     3);
1794 		changed |= 0x01;
1795 	}  else if (debug_status & DBSR_IAC4) {
1796 		current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
1797 		do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT,
1798 			     4);
1799 		changed |= 0x01;
1800 	}
1801 	/*
1802 	 * At the point this routine was called, the MSR(DE) was turned off.
1803 	 * Check all other debug flags and see if that bit needs to be turned
1804 	 * back on or not.
1805 	 */
1806 	if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
1807 			       current->thread.debug.dbcr1))
1808 		regs->msr |= MSR_DE;
1809 	else
1810 		/* Make sure the IDM flag is off */
1811 		current->thread.debug.dbcr0 &= ~DBCR0_IDM;
1812 
1813 	if (changed & 0x01)
1814 		mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
1815 }
1816 
1817 void DebugException(struct pt_regs *regs, unsigned long debug_status)
1818 {
1819 	current->thread.debug.dbsr = debug_status;
1820 
1821 	/* Hack alert: On BookE, Branch Taken stops on the branch itself, while
1822 	 * on server, it stops on the target of the branch. In order to simulate
1823 	 * the server behaviour, we thus restart right away with a single step
1824 	 * instead of stopping here when hitting a BT
1825 	 */
1826 	if (debug_status & DBSR_BT) {
1827 		regs->msr &= ~MSR_DE;
1828 
1829 		/* Disable BT */
1830 		mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
1831 		/* Clear the BT event */
1832 		mtspr(SPRN_DBSR, DBSR_BT);
1833 
1834 		/* Do the single step trick only when coming from userspace */
1835 		if (user_mode(regs)) {
1836 			current->thread.debug.dbcr0 &= ~DBCR0_BT;
1837 			current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1838 			regs->msr |= MSR_DE;
1839 			return;
1840 		}
1841 
1842 		if (kprobe_post_handler(regs))
1843 			return;
1844 
1845 		if (notify_die(DIE_SSTEP, "block_step", regs, 5,
1846 			       5, SIGTRAP) == NOTIFY_STOP) {
1847 			return;
1848 		}
1849 		if (debugger_sstep(regs))
1850 			return;
1851 	} else if (debug_status & DBSR_IC) { 	/* Instruction complete */
1852 		regs->msr &= ~MSR_DE;
1853 
1854 		/* Disable instruction completion */
1855 		mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
1856 		/* Clear the instruction completion event */
1857 		mtspr(SPRN_DBSR, DBSR_IC);
1858 
1859 		if (kprobe_post_handler(regs))
1860 			return;
1861 
1862 		if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1863 			       5, SIGTRAP) == NOTIFY_STOP) {
1864 			return;
1865 		}
1866 
1867 		if (debugger_sstep(regs))
1868 			return;
1869 
1870 		if (user_mode(regs)) {
1871 			current->thread.debug.dbcr0 &= ~DBCR0_IC;
1872 			if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
1873 					       current->thread.debug.dbcr1))
1874 				regs->msr |= MSR_DE;
1875 			else
1876 				/* Make sure the IDM bit is off */
1877 				current->thread.debug.dbcr0 &= ~DBCR0_IDM;
1878 		}
1879 
1880 		_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1881 	} else
1882 		handle_debug(regs, debug_status);
1883 }
1884 NOKPROBE_SYMBOL(DebugException);
1885 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1886 
1887 #if !defined(CONFIG_TAU_INT)
1888 void TAUException(struct pt_regs *regs)
1889 {
1890 	printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx    %s\n",
1891 	       regs->nip, regs->msr, regs->trap, print_tainted());
1892 }
1893 #endif /* CONFIG_INT_TAU */
1894 
1895 #ifdef CONFIG_ALTIVEC
1896 void altivec_assist_exception(struct pt_regs *regs)
1897 {
1898 	int err;
1899 
1900 	if (!user_mode(regs)) {
1901 		printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
1902 		       " at %lx\n", regs->nip);
1903 		die("Kernel VMX/Altivec assist exception", regs, SIGILL);
1904 	}
1905 
1906 	flush_altivec_to_thread(current);
1907 
1908 	PPC_WARN_EMULATED(altivec, regs);
1909 	err = emulate_altivec(regs);
1910 	if (err == 0) {
1911 		regs->nip += 4;		/* skip emulated instruction */
1912 		emulate_single_step(regs);
1913 		return;
1914 	}
1915 
1916 	if (err == -EFAULT) {
1917 		/* got an error reading the instruction */
1918 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1919 	} else {
1920 		/* didn't recognize the instruction */
1921 		/* XXX quick hack for now: set the non-Java bit in the VSCR */
1922 		printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
1923 				   "in %s at %lx\n", current->comm, regs->nip);
1924 		current->thread.vr_state.vscr.u[3] |= 0x10000;
1925 	}
1926 }
1927 #endif /* CONFIG_ALTIVEC */
1928 
1929 #ifdef CONFIG_FSL_BOOKE
1930 void CacheLockingException(struct pt_regs *regs, unsigned long address,
1931 			   unsigned long error_code)
1932 {
1933 	/* We treat cache locking instructions from the user
1934 	 * as priv ops, in the future we could try to do
1935 	 * something smarter
1936 	 */
1937 	if (error_code & (ESR_DLK|ESR_ILK))
1938 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1939 	return;
1940 }
1941 #endif /* CONFIG_FSL_BOOKE */
1942 
1943 #ifdef CONFIG_SPE
1944 void SPEFloatingPointException(struct pt_regs *regs)
1945 {
1946 	extern int do_spe_mathemu(struct pt_regs *regs);
1947 	unsigned long spefscr;
1948 	int fpexc_mode;
1949 	int code = 0;
1950 	int err;
1951 
1952 	flush_spe_to_thread(current);
1953 
1954 	spefscr = current->thread.spefscr;
1955 	fpexc_mode = current->thread.fpexc_mode;
1956 
1957 	if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1958 		code = FPE_FLTOVF;
1959 	}
1960 	else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1961 		code = FPE_FLTUND;
1962 	}
1963 	else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1964 		code = FPE_FLTDIV;
1965 	else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1966 		code = FPE_FLTINV;
1967 	}
1968 	else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1969 		code = FPE_FLTRES;
1970 
1971 	err = do_spe_mathemu(regs);
1972 	if (err == 0) {
1973 		regs->nip += 4;		/* skip emulated instruction */
1974 		emulate_single_step(regs);
1975 		return;
1976 	}
1977 
1978 	if (err == -EFAULT) {
1979 		/* got an error reading the instruction */
1980 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1981 	} else if (err == -EINVAL) {
1982 		/* didn't recognize the instruction */
1983 		printk(KERN_ERR "unrecognized spe instruction "
1984 		       "in %s at %lx\n", current->comm, regs->nip);
1985 	} else {
1986 		_exception(SIGFPE, regs, code, regs->nip);
1987 	}
1988 
1989 	return;
1990 }
1991 
1992 void SPEFloatingPointRoundException(struct pt_regs *regs)
1993 {
1994 	extern int speround_handler(struct pt_regs *regs);
1995 	int err;
1996 
1997 	preempt_disable();
1998 	if (regs->msr & MSR_SPE)
1999 		giveup_spe(current);
2000 	preempt_enable();
2001 
2002 	regs->nip -= 4;
2003 	err = speround_handler(regs);
2004 	if (err == 0) {
2005 		regs->nip += 4;		/* skip emulated instruction */
2006 		emulate_single_step(regs);
2007 		return;
2008 	}
2009 
2010 	if (err == -EFAULT) {
2011 		/* got an error reading the instruction */
2012 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
2013 	} else if (err == -EINVAL) {
2014 		/* didn't recognize the instruction */
2015 		printk(KERN_ERR "unrecognized spe instruction "
2016 		       "in %s at %lx\n", current->comm, regs->nip);
2017 	} else {
2018 		_exception(SIGFPE, regs, 0, regs->nip);
2019 		return;
2020 	}
2021 }
2022 #endif
2023 
2024 /*
2025  * We enter here if we get an unrecoverable exception, that is, one
2026  * that happened at a point where the RI (recoverable interrupt) bit
2027  * in the MSR is 0.  This indicates that SRR0/1 are live, and that
2028  * we therefore lost state by taking this exception.
2029  */
2030 void unrecoverable_exception(struct pt_regs *regs)
2031 {
2032 	printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
2033 	       regs->trap, regs->nip);
2034 	die("Unrecoverable exception", regs, SIGABRT);
2035 }
2036 NOKPROBE_SYMBOL(unrecoverable_exception);
2037 
2038 #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
2039 /*
2040  * Default handler for a Watchdog exception,
2041  * spins until a reboot occurs
2042  */
2043 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
2044 {
2045 	/* Generic WatchdogHandler, implement your own */
2046 	mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
2047 	return;
2048 }
2049 
2050 void WatchdogException(struct pt_regs *regs)
2051 {
2052 	printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
2053 	WatchdogHandler(regs);
2054 }
2055 #endif
2056 
2057 /*
2058  * We enter here if we discover during exception entry that we are
2059  * running in supervisor mode with a userspace value in the stack pointer.
2060  */
2061 void kernel_bad_stack(struct pt_regs *regs)
2062 {
2063 	printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
2064 	       regs->gpr[1], regs->nip);
2065 	die("Bad kernel stack pointer", regs, SIGABRT);
2066 }
2067 NOKPROBE_SYMBOL(kernel_bad_stack);
2068 
2069 void __init trap_init(void)
2070 {
2071 }
2072 
2073 
2074 #ifdef CONFIG_PPC_EMULATED_STATS
2075 
2076 #define WARN_EMULATED_SETUP(type)	.type = { .name = #type }
2077 
2078 struct ppc_emulated ppc_emulated = {
2079 #ifdef CONFIG_ALTIVEC
2080 	WARN_EMULATED_SETUP(altivec),
2081 #endif
2082 	WARN_EMULATED_SETUP(dcba),
2083 	WARN_EMULATED_SETUP(dcbz),
2084 	WARN_EMULATED_SETUP(fp_pair),
2085 	WARN_EMULATED_SETUP(isel),
2086 	WARN_EMULATED_SETUP(mcrxr),
2087 	WARN_EMULATED_SETUP(mfpvr),
2088 	WARN_EMULATED_SETUP(multiple),
2089 	WARN_EMULATED_SETUP(popcntb),
2090 	WARN_EMULATED_SETUP(spe),
2091 	WARN_EMULATED_SETUP(string),
2092 	WARN_EMULATED_SETUP(sync),
2093 	WARN_EMULATED_SETUP(unaligned),
2094 #ifdef CONFIG_MATH_EMULATION
2095 	WARN_EMULATED_SETUP(math),
2096 #endif
2097 #ifdef CONFIG_VSX
2098 	WARN_EMULATED_SETUP(vsx),
2099 #endif
2100 #ifdef CONFIG_PPC64
2101 	WARN_EMULATED_SETUP(mfdscr),
2102 	WARN_EMULATED_SETUP(mtdscr),
2103 	WARN_EMULATED_SETUP(lq_stq),
2104 	WARN_EMULATED_SETUP(lxvw4x),
2105 	WARN_EMULATED_SETUP(lxvh8x),
2106 	WARN_EMULATED_SETUP(lxvd2x),
2107 	WARN_EMULATED_SETUP(lxvb16x),
2108 #endif
2109 };
2110 
2111 u32 ppc_warn_emulated;
2112 
2113 void ppc_warn_emulated_print(const char *type)
2114 {
2115 	pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
2116 			    type);
2117 }
2118 
2119 static int __init ppc_warn_emulated_init(void)
2120 {
2121 	struct dentry *dir, *d;
2122 	unsigned int i;
2123 	struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
2124 
2125 	if (!powerpc_debugfs_root)
2126 		return -ENODEV;
2127 
2128 	dir = debugfs_create_dir("emulated_instructions",
2129 				 powerpc_debugfs_root);
2130 	if (!dir)
2131 		return -ENOMEM;
2132 
2133 	d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir,
2134 			       &ppc_warn_emulated);
2135 	if (!d)
2136 		goto fail;
2137 
2138 	for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
2139 		d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir,
2140 				       (u32 *)&entries[i].val.counter);
2141 		if (!d)
2142 			goto fail;
2143 	}
2144 
2145 	return 0;
2146 
2147 fail:
2148 	debugfs_remove_recursive(dir);
2149 	return -ENOMEM;
2150 }
2151 
2152 device_initcall(ppc_warn_emulated_init);
2153 
2154 #endif /* CONFIG_PPC_EMULATED_STATS */
2155