xref: /freebsd/sys/powerpc/aim/trap_subr64.S (revision eb69d1f144a6fcc765d1b9d44a5ae8082353e70b)
1/* $FreeBSD$ */
2/* $NetBSD: trap_subr.S,v 1.20 2002/04/22 23:20:08 kleink Exp $	*/
3
4/*-
5 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
6 * Copyright (C) 1995, 1996 TooLs GmbH.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by TooLs GmbH.
20 * 4. The name of TooLs GmbH may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/*
36 * NOTICE: This is not a standalone file.  to use it, #include it in
37 * your port's locore.S, like so:
38 *
39 *	#include <powerpc/aim/trap_subr.S>
40 */
41
42/* Locate the per-CPU data structure */
43#define GET_CPUINFO(r)  \
44        mfsprg0  r
45#define GET_TOCBASE(r)  \
46	li	r,TRAP_TOCBASE;	/* Magic address for TOC */ \
47	ld	r,0(r)
48
49/*
50 * Restore SRs for a pmap
51 *
52 * Requires that r28-r31 be scratch, with r28 initialized to the SLB cache
53 */
54
55/*
56 * User SRs are loaded through a pointer to the current pmap.
57 */
58restore_usersrs:
59	GET_CPUINFO(%r28)
60	ld	%r28,PC_USERSLB(%r28)
61	cmpdi	%r28, 0			/* If user SLB pointer NULL, exit */
62	beqlr
63
64	li	%r29, 0			/* Set the counter to zero */
65
66	slbia
67	slbmfee	%r31,%r29
68	clrrdi	%r31,%r31,28
69	slbie	%r31
701:	ld	%r31, 0(%r28)		/* Load SLB entry pointer */
71	cmpdi	%r31, 0			/* If NULL, stop */
72	beqlr
73
74	ld	%r30, 0(%r31)		/* Load SLBV */
75	ld	%r31, 8(%r31)		/* Load SLBE */
76	or	%r31, %r31, %r29	/*  Set SLBE slot */
77	slbmte	%r30, %r31		/* Install SLB entry */
78
79	addi	%r28, %r28, 8		/* Advance pointer */
80	addi	%r29, %r29, 1
81	b	1b			/* Repeat */
82
83/*
84 * Kernel SRs are loaded directly from the PCPU fields
85 */
86restore_kernsrs:
87	GET_CPUINFO(%r28)
88	addi	%r28,%r28,PC_KERNSLB
89	ld	%r29,16(%r28)		/* One past USER_SLB_SLOT */
90	cmpdi	%r28,0
91	beqlr				/* If first kernel entry is invalid,
92					 * SLBs not in use, so exit early */
93
94	/* Otherwise, set up SLBs */
95	li	%r29, 0			/* Set the counter to zero */
96
97	slbia
98	slbmfee	%r31,%r29
99	clrrdi	%r31,%r31,28
100	slbie	%r31
1011:	cmpdi	%r29, USER_SLB_SLOT	/* Skip the user slot */
102	beq-	2f
103
104	ld	%r31, 8(%r28)		/* Load SLBE */
105	cmpdi	%r31, 0			/* If SLBE is not valid, stop */
106	beqlr
107	ld	%r30, 0(%r28)		/* Load SLBV  */
108	slbmte	%r30, %r31		/* Install SLB entry */
109
1102:	addi	%r28, %r28, 16		/* Advance pointer */
111	addi	%r29, %r29, 1
112	cmpdi	%r29, 64		/* Repeat if we are not at the end */
113	blt	1b
114	blr
115
116/*
117 * FRAME_SETUP assumes:
118 *	SPRG1		SP (1)
119 * 	SPRG3		trap type
120 *	savearea	r27-r31,DAR,DSISR   (DAR & DSISR only for DSI traps)
121 *	r28		LR
122 *	r29		CR
123 *	r30		scratch
124 *	r31		scratch
125 *	r1		kernel stack
126 *	SRR0/1		as at start of trap
127 *
128 * NOTE: SPRG1 is never used while the MMU is on, making it safe to reuse
129 * in any real-mode fault handler, including those handling double faults.
130 */
131#define	FRAME_SETUP(savearea)						\
132/* Have to enable translation to allow access of kernel stack: */	\
133	GET_CPUINFO(%r31);						\
134	mfsrr0	%r30;							\
135	std	%r30,(savearea+CPUSAVE_SRR0)(%r31);	/* save SRR0 */	\
136	mfsrr1	%r30;							\
137	std	%r30,(savearea+CPUSAVE_SRR1)(%r31);	/* save SRR1 */	\
138	mfsprg1	%r31;			/* get saved SP (clears SPRG1) */ \
139	mfmsr	%r30;							\
140	ori	%r30,%r30,(PSL_DR|PSL_IR|PSL_RI)@l; /* relocation on */	\
141	mtmsr	%r30;			/* stack can now be accessed */	\
142	isync;								\
143	stdu	%r31,-(FRAMELEN+288)(%r1); /* save it in the callframe */ \
144	std	%r0, FRAME_0+48(%r1);	/* save r0 in the trapframe */	\
145	std	%r31,FRAME_1+48(%r1);	/* save SP   "      "       */	\
146	std	%r2, FRAME_2+48(%r1);	/* save r2   "      "       */	\
147	std	%r28,FRAME_LR+48(%r1);	/* save LR   "      "       */	\
148	std	%r29,FRAME_CR+48(%r1);	/* save CR   "      "       */	\
149	GET_CPUINFO(%r2);						\
150	ld	%r27,(savearea+CPUSAVE_R27)(%r2); /* get saved r27 */	\
151	ld	%r28,(savearea+CPUSAVE_R28)(%r2); /* get saved r28 */	\
152	ld	%r29,(savearea+CPUSAVE_R29)(%r2); /* get saved r29 */	\
153	ld	%r30,(savearea+CPUSAVE_R30)(%r2); /* get saved r30 */	\
154	ld	%r31,(savearea+CPUSAVE_R31)(%r2); /* get saved r31 */	\
155	std	%r3,  FRAME_3+48(%r1);	/* save r3-r31 */		\
156	std	%r4,  FRAME_4+48(%r1);					\
157	std	%r5,  FRAME_5+48(%r1);					\
158	std	%r6,  FRAME_6+48(%r1);					\
159	std	%r7,  FRAME_7+48(%r1);					\
160	std	%r8,  FRAME_8+48(%r1);					\
161	std	%r9,  FRAME_9+48(%r1);					\
162	std	%r10, FRAME_10+48(%r1);					\
163	std	%r11, FRAME_11+48(%r1);					\
164	std	%r12, FRAME_12+48(%r1);					\
165	std	%r13, FRAME_13+48(%r1);					\
166	std	%r14, FRAME_14+48(%r1);					\
167	std	%r15, FRAME_15+48(%r1);					\
168	std	%r16, FRAME_16+48(%r1);					\
169	std	%r17, FRAME_17+48(%r1);					\
170	std	%r18, FRAME_18+48(%r1);					\
171	std	%r19, FRAME_19+48(%r1);					\
172	std	%r20, FRAME_20+48(%r1);					\
173	std	%r21, FRAME_21+48(%r1);					\
174	std	%r22, FRAME_22+48(%r1);					\
175	std	%r23, FRAME_23+48(%r1);					\
176	std	%r24, FRAME_24+48(%r1);					\
177	std	%r25, FRAME_25+48(%r1);					\
178	std	%r26, FRAME_26+48(%r1);					\
179	std	%r27, FRAME_27+48(%r1);					\
180	std	%r28, FRAME_28+48(%r1);					\
181	std	%r29, FRAME_29+48(%r1);					\
182	std	%r30, FRAME_30+48(%r1);					\
183	std	%r31, FRAME_31+48(%r1);					\
184	ld	%r28,(savearea+CPUSAVE_AIM_DAR)(%r2);  /* saved DAR */	\
185	ld	%r29,(savearea+CPUSAVE_AIM_DSISR)(%r2);/* saved DSISR */\
186	ld	%r30,(savearea+CPUSAVE_SRR0)(%r2); /* saved SRR0 */	\
187	ld	%r31,(savearea+CPUSAVE_SRR1)(%r2); /* saved SRR1 */	\
188	mfxer	%r3;							\
189	mfctr	%r4;							\
190	mfsprg3	%r5;							\
191	std	%r3, FRAME_XER+48(1);	/* save xer/ctr/exc */		\
192	std	%r4, FRAME_CTR+48(1);					\
193	std	%r5, FRAME_EXC+48(1);					\
194	std	%r28,FRAME_AIM_DAR+48(1);				\
195	std	%r29,FRAME_AIM_DSISR+48(1); /* save dsisr/srr0/srr1 */	\
196	std	%r30,FRAME_SRR0+48(1);					\
197	std	%r31,FRAME_SRR1+48(1);					\
198	ld	%r13,PC_CURTHREAD(%r2)	/* set kernel curthread */
199
200#define	FRAME_LEAVE(savearea)						\
201/* Disable exceptions: */						\
202	mfmsr	%r2;							\
203	andi.	%r2,%r2,~PSL_EE@l;					\
204	mtmsr	%r2;							\
205	isync;								\
206/* Now restore regs: */							\
207	ld	%r2,FRAME_SRR0+48(%r1);					\
208	ld	%r3,FRAME_SRR1+48(%r1);					\
209	ld	%r4,FRAME_CTR+48(%r1);					\
210	ld	%r5,FRAME_XER+48(%r1);					\
211	ld	%r6,FRAME_LR+48(%r1);					\
212	GET_CPUINFO(%r7);						\
213	std	%r2,(savearea+CPUSAVE_SRR0)(%r7); /* save SRR0 */	\
214	std	%r3,(savearea+CPUSAVE_SRR1)(%r7); /* save SRR1 */	\
215	ld	%r7,FRAME_CR+48(%r1);					\
216	mtctr	%r4;							\
217	mtxer	%r5;							\
218	mtlr	%r6;							\
219	mtsprg2	%r7;			/* save cr */			\
220	ld	%r31,FRAME_31+48(%r1);   /* restore r0-31 */		\
221	ld	%r30,FRAME_30+48(%r1);					\
222	ld	%r29,FRAME_29+48(%r1);					\
223	ld	%r28,FRAME_28+48(%r1);					\
224	ld	%r27,FRAME_27+48(%r1);					\
225	ld	%r26,FRAME_26+48(%r1);					\
226	ld	%r25,FRAME_25+48(%r1);					\
227	ld	%r24,FRAME_24+48(%r1);					\
228	ld	%r23,FRAME_23+48(%r1);					\
229	ld	%r22,FRAME_22+48(%r1);					\
230	ld	%r21,FRAME_21+48(%r1);					\
231	ld	%r20,FRAME_20+48(%r1);					\
232	ld	%r19,FRAME_19+48(%r1);					\
233	ld	%r18,FRAME_18+48(%r1);					\
234	ld	%r17,FRAME_17+48(%r1);					\
235	ld	%r16,FRAME_16+48(%r1);					\
236	ld	%r15,FRAME_15+48(%r1);					\
237	ld	%r14,FRAME_14+48(%r1);					\
238	ld	%r13,FRAME_13+48(%r1);					\
239	ld	%r12,FRAME_12+48(%r1);					\
240	ld	%r11,FRAME_11+48(%r1);					\
241	ld	%r10,FRAME_10+48(%r1);					\
242	ld	%r9, FRAME_9+48(%r1);					\
243	ld	%r8, FRAME_8+48(%r1);					\
244	ld	%r7, FRAME_7+48(%r1);					\
245	ld	%r6, FRAME_6+48(%r1);					\
246	ld	%r5, FRAME_5+48(%r1);					\
247	ld	%r4, FRAME_4+48(%r1);					\
248	ld	%r3, FRAME_3+48(%r1);					\
249	ld	%r2, FRAME_2+48(%r1);					\
250	ld	%r0, FRAME_0+48(%r1);					\
251	ld	%r1, FRAME_1+48(%r1);					\
252/* Can't touch %r1 from here on */					\
253	mtsprg3	%r3;			/* save r3 */			\
254/* Disable translation, machine check and recoverability: */		\
255	mfmsr	%r3;							\
256	andi.	%r3,%r3,~(PSL_DR|PSL_IR|PSL_ME|PSL_RI)@l;		\
257	mtmsr	%r3;							\
258	isync;								\
259/* Decide whether we return to user mode: */				\
260	GET_CPUINFO(%r3);						\
261	ld	%r3,(savearea+CPUSAVE_SRR1)(%r3);			\
262	mtcr	%r3;							\
263	bf	17,1f;			/* branch if PSL_PR is false */	\
264/* Restore user SRs */							\
265	GET_CPUINFO(%r3);						\
266	std	%r27,(savearea+CPUSAVE_R27)(%r3);			\
267	std	%r28,(savearea+CPUSAVE_R28)(%r3);			\
268	std	%r29,(savearea+CPUSAVE_R29)(%r3);			\
269	std	%r30,(savearea+CPUSAVE_R30)(%r3);			\
270	std	%r31,(savearea+CPUSAVE_R31)(%r3);			\
271	mflr	%r27;			/* preserve LR */		\
272	bl	restore_usersrs;	/* uses r28-r31 */		\
273	mtlr	%r27;							\
274	ld	%r31,(savearea+CPUSAVE_R31)(%r3);			\
275	ld	%r30,(savearea+CPUSAVE_R30)(%r3);			\
276	ld	%r29,(savearea+CPUSAVE_R29)(%r3);			\
277	ld	%r28,(savearea+CPUSAVE_R28)(%r3);			\
278	ld	%r27,(savearea+CPUSAVE_R27)(%r3);			\
2791:	mfsprg2	%r3;			/* restore cr */		\
280	mtcr	%r3;							\
281	GET_CPUINFO(%r3);						\
282	ld	%r3,(savearea+CPUSAVE_SRR0)(%r3); /* restore srr0 */	\
283	mtsrr0	%r3;							\
284	GET_CPUINFO(%r3);						\
285	ld	%r3,(savearea+CPUSAVE_SRR1)(%r3); /* restore srr1 */	\
286	mtsrr1	%r3;							\
287	mfsprg3	%r3			/* restore r3 */
288
289#ifdef KDTRACE_HOOKS
290	.data
291	.globl	dtrace_invop_calltrap_addr
292	.align	8
293	.type	dtrace_invop_calltrap_addr, @object
294        .size	dtrace_invop_calltrap_addr, 8
295dtrace_invop_calltrap_addr:
296	.word	0
297	.word	0
298
299	.text
300#endif
301
302/*
303 * Processor reset exception handler. These are typically
304 * the first instructions the processor executes after a
305 * software reset. We do this in two bits so that we are
306 * not still hanging around in the trap handling region
307 * once the MMU is turned on.
308 */
309	.globl	CNAME(rstcode), CNAME(rstcodeend), CNAME(cpu_reset_handler)
310	.p2align 3
311CNAME(rstcode):
312	/* Explicitly set MSR[SF] */
313	mfmsr	%r9
314	li	%r8,1
315	insrdi	%r9,%r8,1,0
316	mtmsrd	%r9
317	isync
318
319	bl	1f
320	.llong	cpu_reset_handler /* Make sure to maintain 8-byte alignment */
3211:	mflr	%r9
322	ld	%r9,0(%r9)
323	mtlr	%r9
324
325	blr
326CNAME(rstcodeend):
327
328cpu_reset_handler:
329	GET_TOCBASE(%r2)
330
331	ld	%r1,TOC_REF(tmpstk)(%r2)	/* get new SP */
332	addi	%r1,%r1,(TMPSTKSZ-48)
333
334	bl	CNAME(cpudep_ap_early_bootstrap) /* Set PCPU */
335	nop
336	lis	%r3,1@l
337	bl	CNAME(pmap_cpu_bootstrap)	/* Turn on virtual memory */
338	nop
339	bl	CNAME(cpudep_ap_bootstrap)	/* Set up PCPU and stack */
340	nop
341	mr	%r1,%r3				/* Use new stack */
342	bl	CNAME(cpudep_ap_setup)
343	nop
344	GET_CPUINFO(%r5)
345	ld	%r3,(PC_RESTORE)(%r5)
346	cmpldi	%cr0,%r3,0
347	beq	%cr0,2f
348	nop
349	li	%r4,1
350	bl	CNAME(longjmp)
351	nop
3522:
353#ifdef SMP
354	bl	CNAME(machdep_ap_bootstrap)	/* And away! */
355	nop
356#endif
357
358	/* Should not be reached */
3599:
360	b	9b
361
362/*
363 * This code gets copied to all the trap vectors
364 * (except ISI/DSI, ALI, and the interrupts). Has to fit in 8 instructions!
365 */
366
367	.globl	CNAME(trapcode),CNAME(trapcodeend)
368	.p2align 3
369CNAME(trapcode):
370	mtsprg1	%r1			/* save SP */
371	mflr	%r1			/* Save the old LR in r1 */
372	mtsprg2 %r1			/* And then in SPRG2 */
373	ld	%r1,TRAP_GENTRAP(0)
374	mtlr	%r1
375	li	%r1, 0xe0		/* How to get the vector from LR */
376	blrl				/* Branch to generictrap */
377CNAME(trapcodeend):
378
379/*
380 * For SLB misses: do special things for the kernel
381 *
382 * Note: SPRG1 is always safe to overwrite any time the MMU is on, which is
383 * the only time this can be called.
384 */
385	.globl	CNAME(slbtrap),CNAME(slbtrapend)
386	.p2align 3
387CNAME(slbtrap):
388	mtsprg1	%r1			/* save SP */
389	GET_CPUINFO(%r1)
390	std	%r2,(PC_SLBSAVE+16)(%r1)
391	mfcr	%r2			/* save CR */
392	std	%r2,(PC_SLBSAVE+104)(%r1)
393	mfsrr1	%r2			/* test kernel mode */
394	mtcr	%r2
395	bf	17,2f			/* branch if PSL_PR is false */
396	/* User mode */
397	ld	%r2,(PC_SLBSAVE+104)(%r1) /* Restore CR */
398	mtcr	%r2
399	ld	%r2,(PC_SLBSAVE+16)(%r1) /* Restore R2 */
400	mflr	%r1			/* Save the old LR in r1 */
401	mtsprg2 %r1			/* And then in SPRG2 */
402					/* 52 bytes so far */
403	bl	1f
404	.llong	generictrap
4051:	mflr	%r1
406	ld	%r1,0(%r1)
407	mtlr	%r1
408	li	%r1, 0x80		/* How to get the vector from LR */
409	blrl				/* Branch to generictrap */
410					/* 84 bytes */
4112:	mflr	%r2			/* Save the old LR in r2 */
412	nop
413	bl	3f			/* Begin dance to jump to kern_slbtrap*/
414	.llong	kern_slbtrap
4153:	mflr	%r1
416	ld	%r1,0(%r1)
417	mtlr	%r1
418	GET_CPUINFO(%r1)
419	blrl				/* 124 bytes -- 4 to spare */
420CNAME(slbtrapend):
421
422kern_slbtrap:
423	std	%r2,(PC_SLBSAVE+136)(%r1) /* old LR */
424	std	%r3,(PC_SLBSAVE+24)(%r1) /* save R3 */
425
426	/* Check if this needs to be handled as a regular trap (userseg miss) */
427	mflr	%r2
428	andi.	%r2,%r2,0xff80
429	cmpwi	%r2,0x380
430	bne	1f
431	mfdar	%r2
432	b	2f
4331:	mfsrr0	%r2
4342:	/* r2 now contains the fault address */
435	lis	%r3,SEGMENT_MASK@highesta
436	ori	%r3,%r3,SEGMENT_MASK@highera
437	sldi	%r3,%r3,32
438	oris	%r3,%r3,SEGMENT_MASK@ha
439	ori	%r3,%r3,SEGMENT_MASK@l
440	and	%r2,%r2,%r3	/* R2 = segment base address */
441	lis	%r3,USER_ADDR@highesta
442	ori	%r3,%r3,USER_ADDR@highera
443	sldi	%r3,%r3,32
444	oris	%r3,%r3,USER_ADDR@ha
445	ori	%r3,%r3,USER_ADDR@l
446	cmpd	%r2,%r3		/* Compare fault base to USER_ADDR */
447	bne	3f
448
449	/* User seg miss, handle as a regular trap */
450	ld	%r2,(PC_SLBSAVE+104)(%r1) /* Restore CR */
451	mtcr	%r2
452	ld	%r2,(PC_SLBSAVE+16)(%r1) /* Restore R2,R3 */
453	ld	%r3,(PC_SLBSAVE+24)(%r1)
454	ld	%r1,(PC_SLBSAVE+136)(%r1) /* Save the old LR in r1 */
455	mtsprg2 %r1			/* And then in SPRG2 */
456	li	%r1, 0x80		/* How to get the vector from LR */
457	b	generictrap		/* Retain old LR using b */
458
4593:	/* Real kernel SLB miss */
460	std	%r0,(PC_SLBSAVE+0)(%r1)	/* free all volatile regs */
461	mfsprg1	%r2			/* Old R1 */
462	std	%r2,(PC_SLBSAVE+8)(%r1)
463	/* R2,R3 already saved */
464	std	%r4,(PC_SLBSAVE+32)(%r1)
465	std	%r5,(PC_SLBSAVE+40)(%r1)
466	std	%r6,(PC_SLBSAVE+48)(%r1)
467	std	%r7,(PC_SLBSAVE+56)(%r1)
468	std	%r8,(PC_SLBSAVE+64)(%r1)
469	std	%r9,(PC_SLBSAVE+72)(%r1)
470	std	%r10,(PC_SLBSAVE+80)(%r1)
471	std	%r11,(PC_SLBSAVE+88)(%r1)
472	std	%r12,(PC_SLBSAVE+96)(%r1)
473	/* CR already saved */
474	mfxer	%r2			/* save XER */
475	std	%r2,(PC_SLBSAVE+112)(%r1)
476	mflr	%r2			/* save LR (SP already saved) */
477	std	%r2,(PC_SLBSAVE+120)(%r1)
478	mfctr	%r2			/* save CTR */
479	std	%r2,(PC_SLBSAVE+128)(%r1)
480
481	/* Call handler */
482	addi	%r1,%r1,PC_SLBSTACK-48+1024
483	li	%r2,~15
484	and	%r1,%r1,%r2
485	GET_TOCBASE(%r2)
486	mflr	%r3
487	andi.	%r3,%r3,0xff80
488	mfdar	%r4
489	mfsrr0	%r5
490	bl	handle_kernel_slb_spill
491	nop
492
493	/* Save r28-31, restore r4-r12 */
494	GET_CPUINFO(%r1)
495	ld	%r4,(PC_SLBSAVE+32)(%r1)
496	ld	%r5,(PC_SLBSAVE+40)(%r1)
497	ld	%r6,(PC_SLBSAVE+48)(%r1)
498	ld	%r7,(PC_SLBSAVE+56)(%r1)
499	ld	%r8,(PC_SLBSAVE+64)(%r1)
500	ld	%r9,(PC_SLBSAVE+72)(%r1)
501	ld	%r10,(PC_SLBSAVE+80)(%r1)
502	ld	%r11,(PC_SLBSAVE+88)(%r1)
503	ld	%r12,(PC_SLBSAVE+96)(%r1)
504	std	%r28,(PC_SLBSAVE+64)(%r1)
505	std	%r29,(PC_SLBSAVE+72)(%r1)
506	std	%r30,(PC_SLBSAVE+80)(%r1)
507	std	%r31,(PC_SLBSAVE+88)(%r1)
508
509	/* Restore kernel mapping */
510	bl	restore_kernsrs
511
512	/* Restore remaining registers */
513	ld	%r28,(PC_SLBSAVE+64)(%r1)
514	ld	%r29,(PC_SLBSAVE+72)(%r1)
515	ld	%r30,(PC_SLBSAVE+80)(%r1)
516	ld	%r31,(PC_SLBSAVE+88)(%r1)
517
518	ld	%r2,(PC_SLBSAVE+104)(%r1)
519	mtcr	%r2
520	ld	%r2,(PC_SLBSAVE+112)(%r1)
521	mtxer	%r2
522	ld	%r2,(PC_SLBSAVE+120)(%r1)
523	mtlr	%r2
524	ld	%r2,(PC_SLBSAVE+128)(%r1)
525	mtctr	%r2
526	ld	%r2,(PC_SLBSAVE+136)(%r1)
527	mtlr	%r2
528
529	/* Restore r0-r3 */
530	ld	%r0,(PC_SLBSAVE+0)(%r1)
531	ld	%r2,(PC_SLBSAVE+16)(%r1)
532	ld	%r3,(PC_SLBSAVE+24)(%r1)
533	mfsprg1	%r1
534
535	/* Back to whatever we were doing */
536	rfid
537
538/*
539 * For ALI: has to save DSISR and DAR
540 */
541	.globl	CNAME(alitrap),CNAME(aliend)
542CNAME(alitrap):
543	mtsprg1	%r1			/* save SP */
544	GET_CPUINFO(%r1)
545	std	%r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)	/* free r27-r31 */
546	std	%r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
547	std	%r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
548	std	%r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
549	std	%r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
550	mfdar	%r30
551	mfdsisr	%r31
552	std	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
553	std	%r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1)
554	mfsprg1	%r1			/* restore SP, in case of branch */
555	mflr	%r28			/* save LR */
556	mfcr	%r29			/* save CR */
557
558	/* Begin dance to branch to s_trap in a bit */
559	b	1f
560	.p2align 3
5611:	nop
562	bl	1f
563	.llong	s_trap
5641:	mflr	%r31
565	ld	%r31,0(%r31)
566	mtlr	%r31
567
568	/* Put our exception vector in SPRG3 */
569	li	%r31, EXC_ALI
570	mtsprg3	%r31
571
572	/* Test whether we already had PR set */
573	mfsrr1	%r31
574	mtcr	%r31
575	blrl
576CNAME(aliend):
577
578/*
579 * Similar to the above for DSI
580 * Has to handle standard pagetable spills
581 */
582	.globl	CNAME(dsitrap),CNAME(dsiend)
583	.p2align 3
584CNAME(dsitrap):
585	mtsprg1	%r1			/* save SP */
586	GET_CPUINFO(%r1)
587	std	%r27,(PC_DISISAVE+CPUSAVE_R27)(%r1)	/* free r27-r31 */
588	std	%r28,(PC_DISISAVE+CPUSAVE_R28)(%r1)
589	std	%r29,(PC_DISISAVE+CPUSAVE_R29)(%r1)
590	std	%r30,(PC_DISISAVE+CPUSAVE_R30)(%r1)
591	std	%r31,(PC_DISISAVE+CPUSAVE_R31)(%r1)
592	mfcr	%r29			/* save CR */
593	mfxer	%r30			/* save XER */
594	mtsprg2	%r30			/* in SPRG2 */
595	mfsrr1	%r31			/* test kernel mode */
596	mtcr	%r31
597	mflr	%r28			/* save LR (SP already saved) */
598	bl	1f			/* Begin branching to disitrap */
599	.llong	disitrap
6001:	mflr	%r1
601	ld	%r1,0(%r1)
602	mtlr	%r1
603	blrl				/* Branch to generictrap */
604CNAME(dsiend):
605
606/*
607 * Preamble code for DSI/ISI traps
608 */
609disitrap:
610	/* Write the trap vector to SPRG3 by computing LR & 0xff00 */
611	mflr	%r1
612	andi.	%r1,%r1,0xff00
613	mtsprg3	%r1
614
615	GET_CPUINFO(%r1)
616	ld	%r31,(PC_DISISAVE+CPUSAVE_R27)(%r1)
617	std	%r31,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)
618	ld	%r30,(PC_DISISAVE+CPUSAVE_R28)(%r1)
619	std	%r30,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
620	ld	%r31,(PC_DISISAVE+CPUSAVE_R29)(%r1)
621	std	%r31,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
622	ld	%r30,(PC_DISISAVE+CPUSAVE_R30)(%r1)
623	std	%r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
624	ld	%r31,(PC_DISISAVE+CPUSAVE_R31)(%r1)
625	std	%r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
626	mfdar	%r30
627	mfdsisr	%r31
628	std	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
629	std	%r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1)
630
631#ifdef KDB
632	/* Try to detect a kernel stack overflow */
633	mfsrr1	%r31
634	mtcr	%r31
635	bt	17,realtrap		/* branch is user mode */
636	mfsprg1	%r31			/* get old SP */
637	clrrdi	%r31,%r31,12		/* Round SP down to nearest page */
638	sub.	%r30,%r31,%r30		/* SP - DAR */
639	bge	1f
640	neg	%r30,%r30		/* modulo value */
6411:	cmpldi	%cr0,%r30,4096		/* is DAR within a page of SP? */
642	bge	%cr0,realtrap		/* no, too far away. */
643
644	/* Now convert this DSI into a DDB trap.  */
645	GET_CPUINFO(%r1)
646	ld	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1) /* get DAR */
647	std	%r30,(PC_DBSAVE  +CPUSAVE_AIM_DAR)(%r1) /* save DAR */
648	ld	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1) /* get DSISR */
649	std	%r30,(PC_DBSAVE  +CPUSAVE_AIM_DSISR)(%r1) /* save DSISR */
650	ld	%r31,(PC_DISISAVE+CPUSAVE_R27)(%r1) /* get  r27 */
651	std	%r31,(PC_DBSAVE  +CPUSAVE_R27)(%r1) /* save r27 */
652	ld	%r30,(PC_DISISAVE+CPUSAVE_R28)(%r1) /* get  r28 */
653	std	%r30,(PC_DBSAVE  +CPUSAVE_R28)(%r1) /* save r28 */
654	ld	%r31,(PC_DISISAVE+CPUSAVE_R29)(%r1) /* get  r29 */
655	std	%r31,(PC_DBSAVE  +CPUSAVE_R29)(%r1) /* save r29 */
656	ld	%r30,(PC_DISISAVE+CPUSAVE_R30)(%r1) /* get  r30 */
657	std	%r30,(PC_DBSAVE  +CPUSAVE_R30)(%r1) /* save r30 */
658	ld	%r31,(PC_DISISAVE+CPUSAVE_R31)(%r1) /* get  r31 */
659	std	%r31,(PC_DBSAVE  +CPUSAVE_R31)(%r1) /* save r31 */
660	b	dbtrap
661#endif
662
663	/* XXX need stack probe here */
664realtrap:
665/* Test whether we already had PR set */
666	mfsrr1	%r1
667	mtcr	%r1
668	mfsprg1	%r1			/* restore SP (might have been
669					   overwritten) */
670	bf	17,k_trap		/* branch if PSL_PR is false */
671	GET_CPUINFO(%r1)
672	ld	%r1,PC_CURPCB(%r1)
673	mr	%r27,%r28		/* Save LR, r29 */
674	mtsprg2	%r29
675	bl	restore_kernsrs		/* enable kernel mapping */
676	mfsprg2	%r29
677	mr	%r28,%r27
678	b	s_trap
679
680/*
681 * generictrap does some standard setup for trap handling to minimize
682 * the code that need be installed in the actual vectors. It expects
683 * the following conditions.
684 *
685 * R1 - Trap vector = LR & (0xff00 | R1)
686 * SPRG1 - Original R1 contents
687 * SPRG2 - Original LR
688 */
689
690	.globl	CNAME(generictrap)
691generictrap:
692	/* Save R1 for computing the exception vector */
693	mtsprg3 %r1
694
695	/* Save interesting registers */
696	GET_CPUINFO(%r1)
697	std	%r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)	/* free r27-r31 */
698	std	%r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
699	std	%r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
700	std	%r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
701	std	%r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
702	mfdar	%r30
703	std	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
704	mfsprg1	%r1			/* restore SP, in case of branch */
705	mfsprg2	%r28			/* save LR */
706	mfcr	%r29			/* save CR */
707
708	/* Compute the exception vector from the link register */
709	mfsprg3 %r31
710	ori	%r31,%r31,0xff00
711	mflr	%r30
712	addi	%r30,%r30,-4 /* The branch instruction, not the next */
713	and	%r30,%r30,%r31
714	mtsprg3	%r30
715
716	/* Test whether we already had PR set */
717	mfsrr1	%r31
718	mtcr	%r31
719
720s_trap:
721	bf	17,k_trap		/* branch if PSL_PR is false */
722	GET_CPUINFO(%r1)
723u_trap:
724	ld	%r1,PC_CURPCB(%r1)
725	mr	%r27,%r28		/* Save LR, r29 */
726	mtsprg2	%r29
727	bl	restore_kernsrs		/* enable kernel mapping */
728	mfsprg2	%r29
729	mr	%r28,%r27
730
731/*
732 * Now the common trap catching code.
733 */
734k_trap:
735	FRAME_SETUP(PC_TEMPSAVE)
736/* Call C interrupt dispatcher: */
737trapagain:
738	GET_TOCBASE(%r2)
739	addi	%r3,%r1,48
740	bl	CNAME(powerpc_interrupt)
741	nop
742
743	.globl	CNAME(trapexit)	/* backtrace code sentinel */
744CNAME(trapexit):
745/* Disable interrupts: */
746	mfmsr	%r3
747	andi.	%r3,%r3,~PSL_EE@l
748	mtmsr	%r3
749	isync
750/* Test AST pending: */
751	ld	%r5,FRAME_SRR1+48(%r1)
752	mtcr	%r5
753	bf	17,1f			/* branch if PSL_PR is false */
754
755	GET_CPUINFO(%r3)		/* get per-CPU pointer */
756	lwz	%r4, TD_FLAGS(%r13)	/* get thread flags value */
757	lis	%r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@h
758	ori	%r5,%r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@l
759	and.	%r4,%r4,%r5
760	beq	1f
761	mfmsr	%r3			/* re-enable interrupts */
762	ori	%r3,%r3,PSL_EE@l
763	mtmsr	%r3
764	isync
765	GET_TOCBASE(%r2)
766	addi	%r3,%r1,48
767	bl	CNAME(ast)
768	nop
769	.globl	CNAME(asttrapexit)	/* backtrace code sentinel #2 */
770CNAME(asttrapexit):
771	b	trapexit		/* test ast ret value ? */
7721:
773	FRAME_LEAVE(PC_TEMPSAVE)
774	rfid
775
776#if defined(KDB)
777/*
778 * Deliberate entry to dbtrap
779 */
780ASENTRY_NOPROF(breakpoint)
781	mtsprg1	%r1
782	mfmsr	%r3
783	mtsrr1	%r3
784	andi.	%r3,%r3,~(PSL_EE|PSL_ME)@l
785	mtmsr	%r3			/* disable interrupts */
786	isync
787	GET_CPUINFO(%r3)
788	std	%r27,(PC_DBSAVE+CPUSAVE_R27)(%r3)
789	std	%r28,(PC_DBSAVE+CPUSAVE_R28)(%r3)
790	std	%r29,(PC_DBSAVE+CPUSAVE_R29)(%r3)
791	std	%r30,(PC_DBSAVE+CPUSAVE_R30)(%r3)
792	std	%r31,(PC_DBSAVE+CPUSAVE_R31)(%r3)
793	mflr	%r28
794	li	%r29,EXC_BPT
795	mtlr	%r29
796	mfcr	%r29
797	mtsrr0	%r28
798
799/*
800 * Now the kdb trap catching code.
801 */
802dbtrap:
803	/* Write the trap vector to SPRG3 by computing LR & 0xff00 */
804	mflr	%r1
805	andi.	%r1,%r1,0xff00
806	mtsprg3	%r1
807
808	ld	%r1,TRAP_TOCBASE(0)		/* get new SP */
809	ld	%r1,TOC_REF(tmpstk)(%r1)
810	addi	%r1,%r1,(TMPSTKSZ-48)
811
812	FRAME_SETUP(PC_DBSAVE)
813/* Call C trap code: */
814	GET_TOCBASE(%r2)
815	addi	%r3,%r1,48
816	bl	CNAME(db_trap_glue)
817	nop
818	or.	%r3,%r3,%r3
819	bne	dbleave
820/* This wasn't for KDB, so switch to real trap: */
821	ld	%r3,FRAME_EXC+48(%r1)	/* save exception */
822	GET_CPUINFO(%r4)
823	std	%r3,(PC_DBSAVE+CPUSAVE_R31)(%r4)
824	FRAME_LEAVE(PC_DBSAVE)
825	mtsprg1	%r1			/* prepare for entrance to realtrap */
826	GET_CPUINFO(%r1)
827	std	%r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)
828	std	%r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
829	std	%r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
830	std	%r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
831	std	%r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
832	mflr	%r28
833	mfcr	%r29
834	ld	%r31,(PC_DBSAVE+CPUSAVE_R31)(%r1)
835	mtsprg3	%r31			/* SPRG3 was clobbered by FRAME_LEAVE */
836	mfsprg1	%r1
837	b	realtrap
838dbleave:
839	FRAME_LEAVE(PC_DBSAVE)
840	rfid
841
842/*
843 * In case of KDB we want a separate trap catcher for it
844 */
845	.globl	CNAME(dblow),CNAME(dbend)
846	.p2align 3
847CNAME(dblow):
848	mtsprg1	%r1			/* save SP */
849	mtsprg2	%r29			/* save r29 */
850	mfcr	%r29			/* save CR in r29 */
851	mfsrr1	%r1
852	mtcr	%r1
853	bf	17,1f			/* branch if privileged */
854
855	/* Unprivileged case */
856	mtcr	%r29			/* put the condition register back */
857        mfsprg2	%r29			/* ... and r29 */
858        mflr	%r1			/* save LR */
859	mtsprg2 %r1			/* And then in SPRG2 */
860
861	ld	%r1, TRAP_GENTRAP(0)	/* Get branch address */
862	mtlr	%r1
863	li	%r1, 0	 		/* How to get the vector from LR */
864	blrl				/* Branch to generictrap */
865
8661:
867	GET_CPUINFO(%r1)
868	std	%r27,(PC_DBSAVE+CPUSAVE_R27)(%r1)	/* free r27 */
869	std	%r28,(PC_DBSAVE+CPUSAVE_R28)(%r1)	/* free r28 */
870        mfsprg2	%r28				/* r29 holds cr...  */
871        std	%r28,(PC_DBSAVE+CPUSAVE_R29)(%r1)	/* free r29 */
872        std	%r30,(PC_DBSAVE+CPUSAVE_R30)(%r1)	/* free r30 */
873        std	%r31,(PC_DBSAVE+CPUSAVE_R31)(%r1)	/* free r31 */
874        mflr	%r28					/* save LR */
875	nop						/* alignment */
876	bl	9f					/* Begin branch */
877	.llong	dbtrap
8789:	mflr	%r1
879	ld	%r1,0(%r1)
880	mtlr	%r1
881	blrl				/* Branch to generictrap */
882CNAME(dbend):
883#endif /* KDB */
884