xref: /freebsd/sys/powerpc/aim/trap_subr64.S (revision 11b249f99e4d4891bd2a21ef796d40073ee15508)
1/* $FreeBSD$ */
2/* $NetBSD: trap_subr.S,v 1.20 2002/04/22 23:20:08 kleink Exp $	*/
3
4/*-
5 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
6 * Copyright (C) 1995, 1996 TooLs GmbH.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by TooLs GmbH.
20 * 4. The name of TooLs GmbH may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/*
36 * NOTICE: This is not a standalone file.  to use it, #include it in
37 * your port's locore.S, like so:
38 *
39 *	#include <powerpc/aim/trap_subr.S>
40 */
41
42/*
43 * Save/restore segment registers
44 */
45
46/*
47 * Restore SRs for a pmap
48 *
49 * Requires that r28-r31 be scratch, with r28 initialized to the SLB cache
50 */
51
52/*
53 * User SRs are loaded through a pointer to the current pmap.
54 */
55restore_usersrs:
56	GET_CPUINFO(%r28)
57	ld	%r28,PC_USERSLB(%r28)
58	li	%r29, 0			/* Set the counter to zero */
59
60	slbia
61	slbmfee	%r31,%r29
62	clrrdi	%r31,%r31,28
63	slbie	%r31
641:	ld	%r31, 0(%r28)		/* Load SLB entry pointer */
65	cmpdi	%r31, 0			/* If NULL, stop */
66	beqlr
67
68	ld	%r30, 0(%r31)		/* Load SLBV */
69	ld	%r31, 8(%r31)		/* Load SLBE */
70	or	%r31, %r31, %r29	/*  Set SLBE slot */
71	slbmte	%r30, %r31		/* Install SLB entry */
72
73	addi	%r28, %r28, 8		/* Advance pointer */
74	addi	%r29, %r29, 1
75	b	1b			/* Repeat */
76
77/*
78 * Kernel SRs are loaded directly from the PCPU fields
79 */
80restore_kernsrs:
81	GET_CPUINFO(%r28)
82	addi	%r28,%r28,PC_KERNSLB
83	li	%r29, 0			/* Set the counter to zero */
84
85	slbia
86	slbmfee	%r31,%r29
87	clrrdi	%r31,%r31,28
88	slbie	%r31
891:	cmpdi	%r29, USER_SLB_SLOT	/* Skip the user slot */
90	beq-	2f
91
92	ld	%r31, 8(%r28)		/* Load SLBE */
93	cmpdi	%r31, 0			/* If SLBE is not valid, stop */
94	beqlr
95	ld	%r30, 0(%r28)		/* Load SLBV  */
96	slbmte	%r30, %r31		/* Install SLB entry */
97
982:	addi	%r28, %r28, 16		/* Advance pointer */
99	addi	%r29, %r29, 1
100	cmpdi	%r29, 64		/* Repeat if we are not at the end */
101	blt	1b
102	blr
103
104/*
105 * FRAME_SETUP assumes:
106 *	SPRG1		SP (1)
107 * 	SPRG3		trap type
108 *	savearea	r27-r31,DAR,DSISR   (DAR & DSISR only for DSI traps)
109 *	r28		LR
110 *	r29		CR
111 *	r30		scratch
112 *	r31		scratch
113 *	r1		kernel stack
114 *	SRR0/1		as at start of trap
115 *
116 * NOTE: SPRG1 is never used while the MMU is on, making it safe to reuse
117 * in any real-mode fault handler, including those handling double faults.
118 */
119#define	FRAME_SETUP(savearea)						\
120/* Have to enable translation to allow access of kernel stack: */	\
121	GET_CPUINFO(%r31);						\
122	mfsrr0	%r30;							\
123	std	%r30,(savearea+CPUSAVE_SRR0)(%r31);	/* save SRR0 */	\
124	mfsrr1	%r30;							\
125	std	%r30,(savearea+CPUSAVE_SRR1)(%r31);	/* save SRR1 */	\
126	mfsprg1	%r31;			/* get saved SP (clears SPRG1) */ \
127	mfmsr	%r30;							\
128	ori	%r30,%r30,(PSL_DR|PSL_IR|PSL_RI)@l; /* relocation on */	\
129	mtmsr	%r30;			/* stack can now be accessed */	\
130	isync;								\
131	stdu	%r31,-(FRAMELEN+288)(%r1); /* save it in the callframe */ \
132	std	%r0, FRAME_0+48(%r1);	/* save r0 in the trapframe */	\
133	std	%r31,FRAME_1+48(%r1);	/* save SP   "      "       */	\
134	std	%r2, FRAME_2+48(%r1);	/* save r2   "      "       */	\
135	std	%r28,FRAME_LR+48(%r1);	/* save LR   "      "       */	\
136	std	%r29,FRAME_CR+48(%r1);	/* save CR   "      "       */	\
137	GET_CPUINFO(%r2);						\
138	ld	%r27,(savearea+CPUSAVE_R27)(%r2); /* get saved r27 */	\
139	ld	%r28,(savearea+CPUSAVE_R28)(%r2); /* get saved r28 */	\
140	ld	%r29,(savearea+CPUSAVE_R29)(%r2); /* get saved r29 */	\
141	ld	%r30,(savearea+CPUSAVE_R30)(%r2); /* get saved r30 */	\
142	ld	%r31,(savearea+CPUSAVE_R31)(%r2); /* get saved r31 */	\
143	std	%r3,  FRAME_3+48(%r1);	/* save r3-r31 */		\
144	std	%r4,  FRAME_4+48(%r1);					\
145	std	%r5,  FRAME_5+48(%r1);					\
146	std	%r6,  FRAME_6+48(%r1);					\
147	std	%r7,  FRAME_7+48(%r1);					\
148	std	%r8,  FRAME_8+48(%r1);					\
149	std	%r9,  FRAME_9+48(%r1);					\
150	std	%r10, FRAME_10+48(%r1);					\
151	std	%r11, FRAME_11+48(%r1);					\
152	std	%r12, FRAME_12+48(%r1);					\
153	std	%r13, FRAME_13+48(%r1);					\
154	std	%r14, FRAME_14+48(%r1);					\
155	std	%r15, FRAME_15+48(%r1);					\
156	std	%r16, FRAME_16+48(%r1);					\
157	std	%r17, FRAME_17+48(%r1);					\
158	std	%r18, FRAME_18+48(%r1);					\
159	std	%r19, FRAME_19+48(%r1);					\
160	std	%r20, FRAME_20+48(%r1);					\
161	std	%r21, FRAME_21+48(%r1);					\
162	std	%r22, FRAME_22+48(%r1);					\
163	std	%r23, FRAME_23+48(%r1);					\
164	std	%r24, FRAME_24+48(%r1);					\
165	std	%r25, FRAME_25+48(%r1);					\
166	std	%r26, FRAME_26+48(%r1);					\
167	std	%r27, FRAME_27+48(%r1);					\
168	std	%r28, FRAME_28+48(%r1);					\
169	std	%r29, FRAME_29+48(%r1);					\
170	std	%r30, FRAME_30+48(%r1);					\
171	std	%r31, FRAME_31+48(%r1);					\
172	ld	%r28,(savearea+CPUSAVE_AIM_DAR)(%r2);  /* saved DAR */	\
173	ld	%r29,(savearea+CPUSAVE_AIM_DSISR)(%r2);/* saved DSISR */\
174	ld	%r30,(savearea+CPUSAVE_SRR0)(%r2); /* saved SRR0 */	\
175	ld	%r31,(savearea+CPUSAVE_SRR1)(%r2); /* saved SRR1 */	\
176	mfxer	%r3;							\
177	mfctr	%r4;							\
178	mfsprg3	%r5;							\
179	std	%r3, FRAME_XER+48(1);	/* save xer/ctr/exc */		\
180	std	%r4, FRAME_CTR+48(1);					\
181	std	%r5, FRAME_EXC+48(1);					\
182	std	%r28,FRAME_AIM_DAR+48(1);				\
183	std	%r29,FRAME_AIM_DSISR+48(1); /* save dsisr/srr0/srr1 */	\
184	std	%r30,FRAME_SRR0+48(1);					\
185	std	%r31,FRAME_SRR1+48(1);					\
186	ld	%r13,PC_CURTHREAD(%r2)	/* set kernel curthread */
187
188#define	FRAME_LEAVE(savearea)						\
189/* Disable exceptions: */						\
190	mfmsr	%r2;							\
191	andi.	%r2,%r2,~PSL_EE@l;					\
192	mtmsr	%r2;							\
193	isync;								\
194/* Now restore regs: */							\
195	ld	%r2,FRAME_SRR0+48(%r1);					\
196	ld	%r3,FRAME_SRR1+48(%r1);					\
197	ld	%r4,FRAME_CTR+48(%r1);					\
198	ld	%r5,FRAME_XER+48(%r1);					\
199	ld	%r6,FRAME_LR+48(%r1);					\
200	GET_CPUINFO(%r7);						\
201	std	%r2,(savearea+CPUSAVE_SRR0)(%r7); /* save SRR0 */	\
202	std	%r3,(savearea+CPUSAVE_SRR1)(%r7); /* save SRR1 */	\
203	ld	%r7,FRAME_CR+48(%r1);					\
204	mtctr	%r4;							\
205	mtxer	%r5;							\
206	mtlr	%r6;							\
207	mtsprg2	%r7;			/* save cr */			\
208	ld	%r31,FRAME_31+48(%r1);   /* restore r0-31 */		\
209	ld	%r30,FRAME_30+48(%r1);					\
210	ld	%r29,FRAME_29+48(%r1);					\
211	ld	%r28,FRAME_28+48(%r1);					\
212	ld	%r27,FRAME_27+48(%r1);					\
213	ld	%r26,FRAME_26+48(%r1);					\
214	ld	%r25,FRAME_25+48(%r1);					\
215	ld	%r24,FRAME_24+48(%r1);					\
216	ld	%r23,FRAME_23+48(%r1);					\
217	ld	%r22,FRAME_22+48(%r1);					\
218	ld	%r21,FRAME_21+48(%r1);					\
219	ld	%r20,FRAME_20+48(%r1);					\
220	ld	%r19,FRAME_19+48(%r1);					\
221	ld	%r18,FRAME_18+48(%r1);					\
222	ld	%r17,FRAME_17+48(%r1);					\
223	ld	%r16,FRAME_16+48(%r1);					\
224	ld	%r15,FRAME_15+48(%r1);					\
225	ld	%r14,FRAME_14+48(%r1);					\
226	ld	%r13,FRAME_13+48(%r1);					\
227	ld	%r12,FRAME_12+48(%r1);					\
228	ld	%r11,FRAME_11+48(%r1);					\
229	ld	%r10,FRAME_10+48(%r1);					\
230	ld	%r9, FRAME_9+48(%r1);					\
231	ld	%r8, FRAME_8+48(%r1);					\
232	ld	%r7, FRAME_7+48(%r1);					\
233	ld	%r6, FRAME_6+48(%r1);					\
234	ld	%r5, FRAME_5+48(%r1);					\
235	ld	%r4, FRAME_4+48(%r1);					\
236	ld	%r3, FRAME_3+48(%r1);					\
237	ld	%r2, FRAME_2+48(%r1);					\
238	ld	%r0, FRAME_0+48(%r1);					\
239	ld	%r1, FRAME_1+48(%r1);					\
240/* Can't touch %r1 from here on */					\
241	mtsprg3	%r3;			/* save r3 */			\
242/* Disable translation, machine check and recoverability: */		\
243	mfmsr	%r3;							\
244	andi.	%r3,%r3,~(PSL_DR|PSL_IR|PSL_ME|PSL_RI)@l;		\
245	mtmsr	%r3;							\
246	isync;								\
247/* Decide whether we return to user mode: */				\
248	GET_CPUINFO(%r3);						\
249	ld	%r3,(savearea+CPUSAVE_SRR1)(%r3);			\
250	mtcr	%r3;							\
251	bf	17,1f;			/* branch if PSL_PR is false */	\
252/* Restore user SRs */							\
253	GET_CPUINFO(%r3);						\
254	std	%r27,(savearea+CPUSAVE_R27)(%r3);			\
255	std	%r28,(savearea+CPUSAVE_R28)(%r3);			\
256	std	%r29,(savearea+CPUSAVE_R29)(%r3);			\
257	std	%r30,(savearea+CPUSAVE_R30)(%r3);			\
258	std	%r31,(savearea+CPUSAVE_R31)(%r3);			\
259	mflr	%r27;			/* preserve LR */		\
260	bl	restore_usersrs;	/* uses r28-r31 */		\
261	mtlr	%r27;							\
262	ld	%r31,(savearea+CPUSAVE_R31)(%r3);			\
263	ld	%r30,(savearea+CPUSAVE_R30)(%r3);			\
264	ld	%r29,(savearea+CPUSAVE_R29)(%r3);			\
265	ld	%r28,(savearea+CPUSAVE_R28)(%r3);			\
266	ld	%r27,(savearea+CPUSAVE_R27)(%r3);			\
2671:	mfsprg2	%r3;			/* restore cr */		\
268	mtcr	%r3;							\
269	GET_CPUINFO(%r3);						\
270	ld	%r3,(savearea+CPUSAVE_SRR0)(%r3); /* restore srr0 */	\
271	mtsrr0	%r3;							\
272	GET_CPUINFO(%r3);						\
273	ld	%r3,(savearea+CPUSAVE_SRR1)(%r3); /* restore srr1 */	\
274	mtsrr1	%r3;							\
275	mfsprg3	%r3			/* restore r3 */
276
277#ifdef KDTRACE_HOOKS
278	.data
279	.globl	dtrace_invop_calltrap_addr
280	.align	8
281	.type	dtrace_invop_calltrap_addr, @object
282        .size	dtrace_invop_calltrap_addr, 8
283dtrace_invop_calltrap_addr:
284	.word	0
285	.word	0
286
287	.text
288#endif
289
290/*
291 * Processor reset exception handler. These are typically
292 * the first instructions the processor executes after a
293 * software reset. We do this in two bits so that we are
294 * not still hanging around in the trap handling region
295 * once the MMU is turned on.
296 */
297	.globl	CNAME(rstcode), CNAME(rstcodeend)
298CNAME(rstcode):
299	/* Explicitly set MSR[SF] */
300	mfmsr	%r9
301	li	%r8,1
302	insrdi	%r9,%r8,1,0
303	mtmsrd	%r9
304	isync
305	bl	1f
306	.llong	cpu_reset
3071:	mflr	%r9
308	ld	%r9,0(%r9)
309	mtlr	%r9
310
311	blr
312CNAME(rstcodeend):
313
314cpu_reset:
315	GET_TOCBASE(%r2)
316
317	ld	%r1,TOC_REF(tmpstk)(%r2)	/* get new SP */
318	addi	%r1,%r1,(TMPSTKSZ-48)
319
320	bl	CNAME(cpudep_ap_early_bootstrap) /* Set PCPU */
321	nop
322	lis	%r3,1@l
323	bl	CNAME(pmap_cpu_bootstrap)	/* Turn on virtual memory */
324	nop
325	bl	CNAME(cpudep_ap_bootstrap)	/* Set up PCPU and stack */
326	nop
327	mr	%r1,%r3				/* Use new stack */
328	bl	CNAME(cpudep_ap_setup)
329	nop
330	GET_CPUINFO(%r5)
331	ld	%r3,(PC_RESTORE)(%r5)
332	cmpldi	%cr0,%r3,0
333	beq	%cr0,2f
334	nop
335	li	%r4,1
336	b	CNAME(longjmp)
337	nop
3382:
339#ifdef SMP
340	bl	CNAME(machdep_ap_bootstrap)	/* And away! */
341	nop
342#endif
343
344	/* Should not be reached */
3459:
346	b	9b
347
348/*
349 * This code gets copied to all the trap vectors
350 * (except ISI/DSI, ALI, and the interrupts). Has to fit in 8 instructions!
351 */
352
353	.globl	CNAME(trapcode),CNAME(trapcodeend)
354	.p2align 3
355CNAME(trapcode):
356	mtsprg1	%r1			/* save SP */
357	mflr	%r1			/* Save the old LR in r1 */
358	mtsprg2 %r1			/* And then in SPRG2 */
359	ld	%r1,TRAP_GENTRAP(0)
360	mtlr	%r1
361	li	%r1, 0xe0		/* How to get the vector from LR */
362	blrl				/* Branch to generictrap */
363CNAME(trapcodeend):
364
365/*
366 * For SLB misses: do special things for the kernel
367 *
368 * Note: SPRG1 is always safe to overwrite any time the MMU is on, which is
369 * the only time this can be called.
370 */
371	.globl	CNAME(slbtrap),CNAME(slbtrapend)
372	.p2align 3
373CNAME(slbtrap):
374	mtsprg1	%r1			/* save SP */
375	GET_CPUINFO(%r1)
376	std	%r2,(PC_SLBSAVE+16)(%r1)
377	mfcr	%r2			/* save CR */
378	std	%r2,(PC_SLBSAVE+104)(%r1)
379	mfsrr1	%r2			/* test kernel mode */
380	mtcr	%r2
381	bf	17,2f			/* branch if PSL_PR is false */
382	/* User mode */
383	ld	%r2,(PC_SLBSAVE+104)(%r1) /* Restore CR */
384	mtcr	%r2
385	ld	%r2,(PC_SLBSAVE+16)(%r1) /* Restore R2 */
386	mflr	%r1			/* Save the old LR in r1 */
387	mtsprg2 %r1			/* And then in SPRG2 */
388					/* 52 bytes so far */
389	bl	1f
390	.llong	generictrap
3911:	mflr	%r1
392	ld	%r1,0(%r1)
393	mtlr	%r1
394	li	%r1, 0x80		/* How to get the vector from LR */
395	blrl				/* Branch to generictrap */
396					/* 84 bytes */
3972:	mflr	%r2			/* Save the old LR in r2 */
398	nop
399	bl	3f			/* Begin dance to jump to kern_slbtrap*/
400	.llong	kern_slbtrap
4013:	mflr	%r1
402	ld	%r1,0(%r1)
403	mtlr	%r1
404	GET_CPUINFO(%r1)
405	blrl				/* 124 bytes -- 4 to spare */
406CNAME(slbtrapend):
407
408kern_slbtrap:
409	std	%r2,(PC_SLBSAVE+136)(%r1) /* old LR */
410	std	%r3,(PC_SLBSAVE+24)(%r1) /* save R3 */
411
412	/* Check if this needs to be handled as a regular trap (userseg miss) */
413	mflr	%r2
414	andi.	%r2,%r2,0xff80
415	cmpwi	%r2,0x380
416	bne	1f
417	mfdar	%r2
418	b	2f
4191:	mfsrr0	%r2
4202:	/* r2 now contains the fault address */
421	lis	%r3,SEGMENT_MASK@highesta
422	ori	%r3,%r3,SEGMENT_MASK@highera
423	sldi	%r3,%r3,32
424	oris	%r3,%r3,SEGMENT_MASK@ha
425	ori	%r3,%r3,SEGMENT_MASK@l
426	and	%r2,%r2,%r3	/* R2 = segment base address */
427	lis	%r3,USER_ADDR@highesta
428	ori	%r3,%r3,USER_ADDR@highera
429	sldi	%r3,%r3,32
430	oris	%r3,%r3,USER_ADDR@ha
431	ori	%r3,%r3,USER_ADDR@l
432	cmpd	%r2,%r3		/* Compare fault base to USER_ADDR */
433	bne	3f
434
435	/* User seg miss, handle as a regular trap */
436	ld	%r2,(PC_SLBSAVE+104)(%r1) /* Restore CR */
437	mtcr	%r2
438	ld	%r2,(PC_SLBSAVE+16)(%r1) /* Restore R2,R3 */
439	ld	%r3,(PC_SLBSAVE+24)(%r1)
440	ld	%r1,(PC_SLBSAVE+136)(%r1) /* Save the old LR in r1 */
441	mtsprg2 %r1			/* And then in SPRG2 */
442	li	%r1, 0x80		/* How to get the vector from LR */
443	b	generictrap		/* Retain old LR using b */
444
4453:	/* Real kernel SLB miss */
446	std	%r0,(PC_SLBSAVE+0)(%r1)	/* free all volatile regs */
447	mfsprg1	%r2			/* Old R1 */
448	std	%r2,(PC_SLBSAVE+8)(%r1)
449	/* R2,R3 already saved */
450	std	%r4,(PC_SLBSAVE+32)(%r1)
451	std	%r5,(PC_SLBSAVE+40)(%r1)
452	std	%r6,(PC_SLBSAVE+48)(%r1)
453	std	%r7,(PC_SLBSAVE+56)(%r1)
454	std	%r8,(PC_SLBSAVE+64)(%r1)
455	std	%r9,(PC_SLBSAVE+72)(%r1)
456	std	%r10,(PC_SLBSAVE+80)(%r1)
457	std	%r11,(PC_SLBSAVE+88)(%r1)
458	std	%r12,(PC_SLBSAVE+96)(%r1)
459	/* CR already saved */
460	mfxer	%r2			/* save XER */
461	std	%r2,(PC_SLBSAVE+112)(%r1)
462	mflr	%r2			/* save LR (SP already saved) */
463	std	%r2,(PC_SLBSAVE+120)(%r1)
464	mfctr	%r2			/* save CTR */
465	std	%r2,(PC_SLBSAVE+128)(%r1)
466
467	/* Call handler */
468	addi	%r1,%r1,PC_SLBSTACK-48+1024
469	li	%r2,~15
470	and	%r1,%r1,%r2
471	GET_TOCBASE(%r2)
472	mflr	%r3
473	andi.	%r3,%r3,0xff80
474	mfdar	%r4
475	mfsrr0	%r5
476	bl	handle_kernel_slb_spill
477	nop
478
479	/* Save r28-31, restore r4-r12 */
480	GET_CPUINFO(%r1)
481	ld	%r4,(PC_SLBSAVE+32)(%r1)
482	ld	%r5,(PC_SLBSAVE+40)(%r1)
483	ld	%r6,(PC_SLBSAVE+48)(%r1)
484	ld	%r7,(PC_SLBSAVE+56)(%r1)
485	ld	%r8,(PC_SLBSAVE+64)(%r1)
486	ld	%r9,(PC_SLBSAVE+72)(%r1)
487	ld	%r10,(PC_SLBSAVE+80)(%r1)
488	ld	%r11,(PC_SLBSAVE+88)(%r1)
489	ld	%r12,(PC_SLBSAVE+96)(%r1)
490	std	%r28,(PC_SLBSAVE+64)(%r1)
491	std	%r29,(PC_SLBSAVE+72)(%r1)
492	std	%r30,(PC_SLBSAVE+80)(%r1)
493	std	%r31,(PC_SLBSAVE+88)(%r1)
494
495	/* Restore kernel mapping */
496	bl	restore_kernsrs
497
498	/* Restore remaining registers */
499	ld	%r28,(PC_SLBSAVE+64)(%r1)
500	ld	%r29,(PC_SLBSAVE+72)(%r1)
501	ld	%r30,(PC_SLBSAVE+80)(%r1)
502	ld	%r31,(PC_SLBSAVE+88)(%r1)
503
504	ld	%r2,(PC_SLBSAVE+104)(%r1)
505	mtcr	%r2
506	ld	%r2,(PC_SLBSAVE+112)(%r1)
507	mtxer	%r2
508	ld	%r2,(PC_SLBSAVE+120)(%r1)
509	mtlr	%r2
510	ld	%r2,(PC_SLBSAVE+128)(%r1)
511	mtctr	%r2
512	ld	%r2,(PC_SLBSAVE+136)(%r1)
513	mtlr	%r2
514
515	/* Restore r0-r3 */
516	ld	%r0,(PC_SLBSAVE+0)(%r1)
517	ld	%r2,(PC_SLBSAVE+16)(%r1)
518	ld	%r3,(PC_SLBSAVE+24)(%r1)
519	mfsprg1	%r1
520
521	/* Back to whatever we were doing */
522	rfid
523
524/*
525 * For ALI: has to save DSISR and DAR
526 */
527	.globl	CNAME(alitrap),CNAME(aliend)
528CNAME(alitrap):
529	mtsprg1	%r1			/* save SP */
530	GET_CPUINFO(%r1)
531	std	%r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)	/* free r27-r31 */
532	std	%r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
533	std	%r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
534	std	%r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
535	std	%r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
536	mfdar	%r30
537	mfdsisr	%r31
538	std	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
539	std	%r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1)
540	mfsprg1	%r1			/* restore SP, in case of branch */
541	mflr	%r28			/* save LR */
542	mfcr	%r29			/* save CR */
543
544	/* Begin dance to branch to s_trap in a bit */
545	b	1f
546	.p2align 3
5471:	nop
548	bl	1f
549	.llong	s_trap
5501:	mflr	%r31
551	ld	%r31,0(%r31)
552	mtlr	%r31
553
554	/* Put our exception vector in SPRG3 */
555	li	%r31, EXC_ALI
556	mtsprg3	%r31
557
558	/* Test whether we already had PR set */
559	mfsrr1	%r31
560	mtcr	%r31
561	blrl
562CNAME(aliend):
563
564/*
565 * Similar to the above for DSI
566 * Has to handle standard pagetable spills
567 */
568	.globl	CNAME(dsitrap),CNAME(dsiend)
569CNAME(dsitrap):
570	mtsprg1	%r1			/* save SP */
571	GET_CPUINFO(%r1)
572	std	%r27,(PC_DISISAVE+CPUSAVE_R27)(%r1)	/* free r27-r31 */
573	std	%r28,(PC_DISISAVE+CPUSAVE_R28)(%r1)
574	std	%r29,(PC_DISISAVE+CPUSAVE_R29)(%r1)
575	std	%r30,(PC_DISISAVE+CPUSAVE_R30)(%r1)
576	std	%r31,(PC_DISISAVE+CPUSAVE_R31)(%r1)
577	mfcr	%r29			/* save CR */
578	mfxer	%r30			/* save XER */
579	mtsprg2	%r30			/* in SPRG2 */
580	mfsrr1	%r31			/* test kernel mode */
581	mtcr	%r31
582	mflr	%r28			/* save LR (SP already saved) */
583	bl	1f			/* Begin branching to disitrap */
584	.llong	disitrap
5851:	mflr	%r1
586	ld	%r1,0(%r1)
587	mtlr	%r1
588	blrl				/* Branch to generictrap */
589CNAME(dsiend):
590
591/*
592 * Preamble code for DSI/ISI traps
593 */
594disitrap:
595	/* Write the trap vector to SPRG3 by computing LR & 0xff00 */
596	mflr	%r1
597	andi.	%r1,%r1,0xff00
598	mtsprg3	%r1
599
600	GET_CPUINFO(%r1)
601	ld	%r31,(PC_DISISAVE+CPUSAVE_R27)(%r1)
602	std	%r31,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)
603	ld	%r30,(PC_DISISAVE+CPUSAVE_R28)(%r1)
604	std	%r30,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
605	ld	%r31,(PC_DISISAVE+CPUSAVE_R29)(%r1)
606	std	%r31,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
607	ld	%r30,(PC_DISISAVE+CPUSAVE_R30)(%r1)
608	std	%r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
609	ld	%r31,(PC_DISISAVE+CPUSAVE_R31)(%r1)
610	std	%r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
611	mfdar	%r30
612	mfdsisr	%r31
613	std	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
614	std	%r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1)
615
616#ifdef KDB
617	/* Try to detect a kernel stack overflow */
618	mfsrr1	%r31
619	mtcr	%r31
620	bt	17,realtrap		/* branch is user mode */
621	mfsprg1	%r31			/* get old SP */
622	clrrdi	%r31,%r31,12		/* Round SP down to nearest page */
623	sub.	%r30,%r31,%r30		/* SP - DAR */
624	bge	1f
625	neg	%r30,%r30		/* modulo value */
6261:	cmpldi	%cr0,%r30,4096		/* is DAR within a page of SP? */
627	bge	%cr0,realtrap		/* no, too far away. */
628
629	/* Now convert this DSI into a DDB trap.  */
630	GET_CPUINFO(%r1)
631	ld	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1) /* get DAR */
632	std	%r30,(PC_DBSAVE  +CPUSAVE_AIM_DAR)(%r1) /* save DAR */
633	ld	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1) /* get DSISR */
634	std	%r30,(PC_DBSAVE  +CPUSAVE_AIM_DSISR)(%r1) /* save DSISR */
635	ld	%r31,(PC_DISISAVE+CPUSAVE_R27)(%r1) /* get  r27 */
636	std	%r31,(PC_DBSAVE  +CPUSAVE_R27)(%r1) /* save r27 */
637	ld	%r30,(PC_DISISAVE+CPUSAVE_R28)(%r1) /* get  r28 */
638	std	%r30,(PC_DBSAVE  +CPUSAVE_R28)(%r1) /* save r28 */
639	ld	%r31,(PC_DISISAVE+CPUSAVE_R29)(%r1) /* get  r29 */
640	std	%r31,(PC_DBSAVE  +CPUSAVE_R29)(%r1) /* save r29 */
641	ld	%r30,(PC_DISISAVE+CPUSAVE_R30)(%r1) /* get  r30 */
642	std	%r30,(PC_DBSAVE  +CPUSAVE_R30)(%r1) /* save r30 */
643	ld	%r31,(PC_DISISAVE+CPUSAVE_R31)(%r1) /* get  r31 */
644	std	%r31,(PC_DBSAVE  +CPUSAVE_R31)(%r1) /* save r31 */
645	b	dbtrap
646#endif
647
648	/* XXX need stack probe here */
649realtrap:
650/* Test whether we already had PR set */
651	mfsrr1	%r1
652	mtcr	%r1
653	mfsprg1	%r1			/* restore SP (might have been
654					   overwritten) */
655	bf	17,k_trap		/* branch if PSL_PR is false */
656	GET_CPUINFO(%r1)
657	ld	%r1,PC_CURPCB(%r1)
658	mr	%r27,%r28		/* Save LR, r29 */
659	mtsprg2	%r29
660	bl	restore_kernsrs		/* enable kernel mapping */
661	mfsprg2	%r29
662	mr	%r28,%r27
663	b	s_trap
664
665/*
666 * generictrap does some standard setup for trap handling to minimize
667 * the code that need be installed in the actual vectors. It expects
668 * the following conditions.
669 *
670 * R1 - Trap vector = LR & (0xff00 | R1)
671 * SPRG1 - Original R1 contents
672 * SPRG2 - Original LR
673 */
674
675	.globl	CNAME(generictrap)
676generictrap:
677	/* Save R1 for computing the exception vector */
678	mtsprg3 %r1
679
680	/* Save interesting registers */
681	GET_CPUINFO(%r1)
682	std	%r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)	/* free r27-r31 */
683	std	%r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
684	std	%r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
685	std	%r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
686	std	%r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
687	mfdar	%r30
688	std	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
689	mfsprg1	%r1			/* restore SP, in case of branch */
690	mfsprg2	%r28			/* save LR */
691	mfcr	%r29			/* save CR */
692
693	/* Compute the exception vector from the link register */
694	mfsprg3 %r31
695	ori	%r31,%r31,0xff00
696	mflr	%r30
697	addi	%r30,%r30,-4 /* The branch instruction, not the next */
698	and	%r30,%r30,%r31
699	mtsprg3	%r30
700
701	/* Test whether we already had PR set */
702	mfsrr1	%r31
703	mtcr	%r31
704
705s_trap:
706	bf	17,k_trap		/* branch if PSL_PR is false */
707	GET_CPUINFO(%r1)
708u_trap:
709	ld	%r1,PC_CURPCB(%r1)
710	mr	%r27,%r28		/* Save LR, r29 */
711	mtsprg2	%r29
712	bl	restore_kernsrs		/* enable kernel mapping */
713	mfsprg2	%r29
714	mr	%r28,%r27
715
716/*
717 * Now the common trap catching code.
718 */
719k_trap:
720	FRAME_SETUP(PC_TEMPSAVE)
721/* Call C interrupt dispatcher: */
722trapagain:
723	GET_TOCBASE(%r2)
724	addi	%r3,%r1,48
725	bl	CNAME(powerpc_interrupt)
726	nop
727
728	.globl	CNAME(trapexit)	/* backtrace code sentinel */
729CNAME(trapexit):
730/* Disable interrupts: */
731	mfmsr	%r3
732	andi.	%r3,%r3,~PSL_EE@l
733	mtmsr	%r3
734	isync
735/* Test AST pending: */
736	ld	%r5,FRAME_SRR1+48(%r1)
737	mtcr	%r5
738	bf	17,1f			/* branch if PSL_PR is false */
739
740	GET_CPUINFO(%r3)		/* get per-CPU pointer */
741	lwz	%r4, TD_FLAGS(%r13)	/* get thread flags value */
742	lis	%r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@h
743	ori	%r5,%r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@l
744	and.	%r4,%r4,%r5
745	beq	1f
746	mfmsr	%r3			/* re-enable interrupts */
747	ori	%r3,%r3,PSL_EE@l
748	mtmsr	%r3
749	isync
750	GET_TOCBASE(%r2)
751	addi	%r3,%r1,48
752	bl	CNAME(ast)
753	nop
754	.globl	CNAME(asttrapexit)	/* backtrace code sentinel #2 */
755CNAME(asttrapexit):
756	b	trapexit		/* test ast ret value ? */
7571:
758	FRAME_LEAVE(PC_TEMPSAVE)
759	rfid
760
761#if defined(KDB)
762/*
763 * Deliberate entry to dbtrap
764 */
765ASENTRY_NOPROF(breakpoint)
766	mtsprg1	%r1
767	mfmsr	%r3
768	mtsrr1	%r3
769	andi.	%r3,%r3,~(PSL_EE|PSL_ME)@l
770	mtmsr	%r3			/* disable interrupts */
771	isync
772	GET_CPUINFO(%r3)
773	std	%r27,(PC_DBSAVE+CPUSAVE_R27)(%r3)
774	std	%r28,(PC_DBSAVE+CPUSAVE_R28)(%r3)
775	std	%r29,(PC_DBSAVE+CPUSAVE_R29)(%r3)
776	std	%r30,(PC_DBSAVE+CPUSAVE_R30)(%r3)
777	std	%r31,(PC_DBSAVE+CPUSAVE_R31)(%r3)
778	mflr	%r28
779	li	%r29,EXC_BPT
780	mtlr	%r29
781	mfcr	%r29
782	mtsrr0	%r28
783
784/*
785 * Now the kdb trap catching code.
786 */
787dbtrap:
788	/* Write the trap vector to SPRG3 by computing LR & 0xff00 */
789	mflr	%r1
790	andi.	%r1,%r1,0xff00
791	mtsprg3	%r1
792
793	ld	%r1,TRAP_TOCBASE(0)		/* get new SP */
794	ld	%r1,TOC_REF(tmpstk)(%r1)
795	addi	%r1,%r1,(TMPSTKSZ-48)
796
797	FRAME_SETUP(PC_DBSAVE)
798/* Call C trap code: */
799	GET_TOCBASE(%r2)
800	addi	%r3,%r1,48
801	bl	CNAME(db_trap_glue)
802	nop
803	or.	%r3,%r3,%r3
804	bne	dbleave
805/* This wasn't for KDB, so switch to real trap: */
806	ld	%r3,FRAME_EXC+48(%r1)	/* save exception */
807	GET_CPUINFO(%r4)
808	std	%r3,(PC_DBSAVE+CPUSAVE_R31)(%r4)
809	FRAME_LEAVE(PC_DBSAVE)
810	mtsprg1	%r1			/* prepare for entrance to realtrap */
811	GET_CPUINFO(%r1)
812	std	%r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)
813	std	%r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
814	std	%r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
815	std	%r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
816	std	%r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
817	mflr	%r28
818	mfcr	%r29
819	ld	%r31,(PC_DBSAVE+CPUSAVE_R31)(%r1)
820	mtsprg3	%r31			/* SPRG3 was clobbered by FRAME_LEAVE */
821	mfsprg1	%r1
822	b	realtrap
823dbleave:
824	FRAME_LEAVE(PC_DBSAVE)
825	rfid
826
827/*
828 * In case of KDB we want a separate trap catcher for it
829 */
830	.globl	CNAME(dblow),CNAME(dbend)
831CNAME(dblow):
832	mtsprg1	%r1			/* save SP */
833	mtsprg2	%r29			/* save r29 */
834	mfcr	%r29			/* save CR in r29 */
835	mfsrr1	%r1
836	mtcr	%r1
837	bf	17,1f			/* branch if privileged */
838
839	/* Unprivileged case */
840	mtcr	%r29			/* put the condition register back */
841        mfsprg2	%r29			/* ... and r29 */
842        mflr	%r1			/* save LR */
843	mtsprg2 %r1			/* And then in SPRG2 */
844
845	ld	%r1, TRAP_GENTRAP(0)	/* Get branch address */
846	mtlr	%r1
847	li	%r1, 0	 		/* How to get the vector from LR */
848	blrl				/* Branch to generictrap */
849
8501:
851	GET_CPUINFO(%r1)
852	std	%r27,(PC_DBSAVE+CPUSAVE_R27)(%r1)	/* free r27 */
853	std	%r28,(PC_DBSAVE+CPUSAVE_R28)(%r1)	/* free r28 */
854        mfsprg2	%r28				/* r29 holds cr...  */
855        std	%r28,(PC_DBSAVE+CPUSAVE_R29)(%r1)	/* free r29 */
856        std	%r30,(PC_DBSAVE+CPUSAVE_R30)(%r1)	/* free r30 */
857        std	%r31,(PC_DBSAVE+CPUSAVE_R31)(%r1)	/* free r31 */
858        mflr	%r28					/* save LR */
859	bl	9f					/* Begin branch */
860	.llong	dbtrap
8619:	mflr	%r1
862	ld	%r1,0(%r1)
863	mtlr	%r1
864	blrl				/* Branch to generictrap */
865CNAME(dbend):
866#endif /* KDB */
867