xref: /freebsd/sys/powerpc/aim/trap_subr64.S (revision 60eddb209b5ad13a549ca74a41b7cb38a31da5ef)
1/* $FreeBSD$ */
2/* $NetBSD: trap_subr.S,v 1.20 2002/04/22 23:20:08 kleink Exp $	*/
3
4/*-
5 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
6 * Copyright (C) 1995, 1996 TooLs GmbH.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by TooLs GmbH.
20 * 4. The name of TooLs GmbH may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/*
36 * NOTICE: This is not a standalone file.  to use it, #include it in
37 * your port's locore.S, like so:
38 *
39 *	#include <powerpc/aim/trap_subr.S>
40 */
41
42/* Locate the per-CPU data structure */
43#define GET_CPUINFO(r)  \
44        mfsprg0  r
45#define GET_TOCBASE(r)  \
46	li	r,TRAP_TOCBASE;	/* Magic address for TOC */ \
47	ld	r,0(r)
48
49/*
50 * Restore SRs for a pmap
51 *
52 * Requires that r28-r31 be scratch, with r28 initialized to the SLB cache
53 */
54
55/*
56 * User SRs are loaded through a pointer to the current pmap.
57 */
58restore_usersrs:
59	GET_CPUINFO(%r28)
60	ld	%r28,PC_USERSLB(%r28)
61	li	%r29, 0			/* Set the counter to zero */
62
63	slbia
64	slbmfee	%r31,%r29
65	clrrdi	%r31,%r31,28
66	slbie	%r31
671:	ld	%r31, 0(%r28)		/* Load SLB entry pointer */
68	cmpdi	%r31, 0			/* If NULL, stop */
69	beqlr
70
71	ld	%r30, 0(%r31)		/* Load SLBV */
72	ld	%r31, 8(%r31)		/* Load SLBE */
73	or	%r31, %r31, %r29	/*  Set SLBE slot */
74	slbmte	%r30, %r31		/* Install SLB entry */
75
76	addi	%r28, %r28, 8		/* Advance pointer */
77	addi	%r29, %r29, 1
78	b	1b			/* Repeat */
79
80/*
81 * Kernel SRs are loaded directly from the PCPU fields
82 */
83restore_kernsrs:
84	GET_CPUINFO(%r28)
85	addi	%r28,%r28,PC_KERNSLB
86	li	%r29, 0			/* Set the counter to zero */
87
88	slbia
89	slbmfee	%r31,%r29
90	clrrdi	%r31,%r31,28
91	slbie	%r31
921:	cmpdi	%r29, USER_SLB_SLOT	/* Skip the user slot */
93	beq-	2f
94
95	ld	%r31, 8(%r28)		/* Load SLBE */
96	cmpdi	%r31, 0			/* If SLBE is not valid, stop */
97	beqlr
98	ld	%r30, 0(%r28)		/* Load SLBV  */
99	slbmte	%r30, %r31		/* Install SLB entry */
100
1012:	addi	%r28, %r28, 16		/* Advance pointer */
102	addi	%r29, %r29, 1
103	cmpdi	%r29, 64		/* Repeat if we are not at the end */
104	blt	1b
105	blr
106
107/*
108 * FRAME_SETUP assumes:
109 *	SPRG1		SP (1)
110 * 	SPRG3		trap type
111 *	savearea	r27-r31,DAR,DSISR   (DAR & DSISR only for DSI traps)
112 *	r28		LR
113 *	r29		CR
114 *	r30		scratch
115 *	r31		scratch
116 *	r1		kernel stack
117 *	SRR0/1		as at start of trap
118 *
119 * NOTE: SPRG1 is never used while the MMU is on, making it safe to reuse
120 * in any real-mode fault handler, including those handling double faults.
121 */
122#define	FRAME_SETUP(savearea)						\
123/* Have to enable translation to allow access of kernel stack: */	\
124	GET_CPUINFO(%r31);						\
125	mfsrr0	%r30;							\
126	std	%r30,(savearea+CPUSAVE_SRR0)(%r31);	/* save SRR0 */	\
127	mfsrr1	%r30;							\
128	std	%r30,(savearea+CPUSAVE_SRR1)(%r31);	/* save SRR1 */	\
129	mfsprg1	%r31;			/* get saved SP (clears SPRG1) */ \
130	mfmsr	%r30;							\
131	ori	%r30,%r30,(PSL_DR|PSL_IR|PSL_RI)@l; /* relocation on */	\
132	mtmsr	%r30;			/* stack can now be accessed */	\
133	isync;								\
134	stdu	%r31,-(FRAMELEN+288)(%r1); /* save it in the callframe */ \
135	std	%r0, FRAME_0+48(%r1);	/* save r0 in the trapframe */	\
136	std	%r31,FRAME_1+48(%r1);	/* save SP   "      "       */	\
137	std	%r2, FRAME_2+48(%r1);	/* save r2   "      "       */	\
138	std	%r28,FRAME_LR+48(%r1);	/* save LR   "      "       */	\
139	std	%r29,FRAME_CR+48(%r1);	/* save CR   "      "       */	\
140	GET_CPUINFO(%r2);						\
141	ld	%r27,(savearea+CPUSAVE_R27)(%r2); /* get saved r27 */	\
142	ld	%r28,(savearea+CPUSAVE_R28)(%r2); /* get saved r28 */	\
143	ld	%r29,(savearea+CPUSAVE_R29)(%r2); /* get saved r29 */	\
144	ld	%r30,(savearea+CPUSAVE_R30)(%r2); /* get saved r30 */	\
145	ld	%r31,(savearea+CPUSAVE_R31)(%r2); /* get saved r31 */	\
146	std	%r3,  FRAME_3+48(%r1);	/* save r3-r31 */		\
147	std	%r4,  FRAME_4+48(%r1);					\
148	std	%r5,  FRAME_5+48(%r1);					\
149	std	%r6,  FRAME_6+48(%r1);					\
150	std	%r7,  FRAME_7+48(%r1);					\
151	std	%r8,  FRAME_8+48(%r1);					\
152	std	%r9,  FRAME_9+48(%r1);					\
153	std	%r10, FRAME_10+48(%r1);					\
154	std	%r11, FRAME_11+48(%r1);					\
155	std	%r12, FRAME_12+48(%r1);					\
156	std	%r13, FRAME_13+48(%r1);					\
157	std	%r14, FRAME_14+48(%r1);					\
158	std	%r15, FRAME_15+48(%r1);					\
159	std	%r16, FRAME_16+48(%r1);					\
160	std	%r17, FRAME_17+48(%r1);					\
161	std	%r18, FRAME_18+48(%r1);					\
162	std	%r19, FRAME_19+48(%r1);					\
163	std	%r20, FRAME_20+48(%r1);					\
164	std	%r21, FRAME_21+48(%r1);					\
165	std	%r22, FRAME_22+48(%r1);					\
166	std	%r23, FRAME_23+48(%r1);					\
167	std	%r24, FRAME_24+48(%r1);					\
168	std	%r25, FRAME_25+48(%r1);					\
169	std	%r26, FRAME_26+48(%r1);					\
170	std	%r27, FRAME_27+48(%r1);					\
171	std	%r28, FRAME_28+48(%r1);					\
172	std	%r29, FRAME_29+48(%r1);					\
173	std	%r30, FRAME_30+48(%r1);					\
174	std	%r31, FRAME_31+48(%r1);					\
175	ld	%r28,(savearea+CPUSAVE_AIM_DAR)(%r2);  /* saved DAR */	\
176	ld	%r29,(savearea+CPUSAVE_AIM_DSISR)(%r2);/* saved DSISR */\
177	ld	%r30,(savearea+CPUSAVE_SRR0)(%r2); /* saved SRR0 */	\
178	ld	%r31,(savearea+CPUSAVE_SRR1)(%r2); /* saved SRR1 */	\
179	mfxer	%r3;							\
180	mfctr	%r4;							\
181	mfsprg3	%r5;							\
182	std	%r3, FRAME_XER+48(1);	/* save xer/ctr/exc */		\
183	std	%r4, FRAME_CTR+48(1);					\
184	std	%r5, FRAME_EXC+48(1);					\
185	std	%r28,FRAME_AIM_DAR+48(1);				\
186	std	%r29,FRAME_AIM_DSISR+48(1); /* save dsisr/srr0/srr1 */	\
187	std	%r30,FRAME_SRR0+48(1);					\
188	std	%r31,FRAME_SRR1+48(1);					\
189	ld	%r13,PC_CURTHREAD(%r2)	/* set kernel curthread */
190
191#define	FRAME_LEAVE(savearea)						\
192/* Disable exceptions: */						\
193	mfmsr	%r2;							\
194	andi.	%r2,%r2,~PSL_EE@l;					\
195	mtmsr	%r2;							\
196	isync;								\
197/* Now restore regs: */							\
198	ld	%r2,FRAME_SRR0+48(%r1);					\
199	ld	%r3,FRAME_SRR1+48(%r1);					\
200	ld	%r4,FRAME_CTR+48(%r1);					\
201	ld	%r5,FRAME_XER+48(%r1);					\
202	ld	%r6,FRAME_LR+48(%r1);					\
203	GET_CPUINFO(%r7);						\
204	std	%r2,(savearea+CPUSAVE_SRR0)(%r7); /* save SRR0 */	\
205	std	%r3,(savearea+CPUSAVE_SRR1)(%r7); /* save SRR1 */	\
206	ld	%r7,FRAME_CR+48(%r1);					\
207	mtctr	%r4;							\
208	mtxer	%r5;							\
209	mtlr	%r6;							\
210	mtsprg2	%r7;			/* save cr */			\
211	ld	%r31,FRAME_31+48(%r1);   /* restore r0-31 */		\
212	ld	%r30,FRAME_30+48(%r1);					\
213	ld	%r29,FRAME_29+48(%r1);					\
214	ld	%r28,FRAME_28+48(%r1);					\
215	ld	%r27,FRAME_27+48(%r1);					\
216	ld	%r26,FRAME_26+48(%r1);					\
217	ld	%r25,FRAME_25+48(%r1);					\
218	ld	%r24,FRAME_24+48(%r1);					\
219	ld	%r23,FRAME_23+48(%r1);					\
220	ld	%r22,FRAME_22+48(%r1);					\
221	ld	%r21,FRAME_21+48(%r1);					\
222	ld	%r20,FRAME_20+48(%r1);					\
223	ld	%r19,FRAME_19+48(%r1);					\
224	ld	%r18,FRAME_18+48(%r1);					\
225	ld	%r17,FRAME_17+48(%r1);					\
226	ld	%r16,FRAME_16+48(%r1);					\
227	ld	%r15,FRAME_15+48(%r1);					\
228	ld	%r14,FRAME_14+48(%r1);					\
229	ld	%r13,FRAME_13+48(%r1);					\
230	ld	%r12,FRAME_12+48(%r1);					\
231	ld	%r11,FRAME_11+48(%r1);					\
232	ld	%r10,FRAME_10+48(%r1);					\
233	ld	%r9, FRAME_9+48(%r1);					\
234	ld	%r8, FRAME_8+48(%r1);					\
235	ld	%r7, FRAME_7+48(%r1);					\
236	ld	%r6, FRAME_6+48(%r1);					\
237	ld	%r5, FRAME_5+48(%r1);					\
238	ld	%r4, FRAME_4+48(%r1);					\
239	ld	%r3, FRAME_3+48(%r1);					\
240	ld	%r2, FRAME_2+48(%r1);					\
241	ld	%r0, FRAME_0+48(%r1);					\
242	ld	%r1, FRAME_1+48(%r1);					\
243/* Can't touch %r1 from here on */					\
244	mtsprg3	%r3;			/* save r3 */			\
245/* Disable translation, machine check and recoverability: */		\
246	mfmsr	%r3;							\
247	andi.	%r3,%r3,~(PSL_DR|PSL_IR|PSL_ME|PSL_RI)@l;		\
248	mtmsr	%r3;							\
249	isync;								\
250/* Decide whether we return to user mode: */				\
251	GET_CPUINFO(%r3);						\
252	ld	%r3,(savearea+CPUSAVE_SRR1)(%r3);			\
253	mtcr	%r3;							\
254	bf	17,1f;			/* branch if PSL_PR is false */	\
255/* Restore user SRs */							\
256	GET_CPUINFO(%r3);						\
257	std	%r27,(savearea+CPUSAVE_R27)(%r3);			\
258	std	%r28,(savearea+CPUSAVE_R28)(%r3);			\
259	std	%r29,(savearea+CPUSAVE_R29)(%r3);			\
260	std	%r30,(savearea+CPUSAVE_R30)(%r3);			\
261	std	%r31,(savearea+CPUSAVE_R31)(%r3);			\
262	mflr	%r27;			/* preserve LR */		\
263	bl	restore_usersrs;	/* uses r28-r31 */		\
264	mtlr	%r27;							\
265	ld	%r31,(savearea+CPUSAVE_R31)(%r3);			\
266	ld	%r30,(savearea+CPUSAVE_R30)(%r3);			\
267	ld	%r29,(savearea+CPUSAVE_R29)(%r3);			\
268	ld	%r28,(savearea+CPUSAVE_R28)(%r3);			\
269	ld	%r27,(savearea+CPUSAVE_R27)(%r3);			\
2701:	mfsprg2	%r3;			/* restore cr */		\
271	mtcr	%r3;							\
272	GET_CPUINFO(%r3);						\
273	ld	%r3,(savearea+CPUSAVE_SRR0)(%r3); /* restore srr0 */	\
274	mtsrr0	%r3;							\
275	GET_CPUINFO(%r3);						\
276	ld	%r3,(savearea+CPUSAVE_SRR1)(%r3); /* restore srr1 */	\
277	mtsrr1	%r3;							\
278	mfsprg3	%r3			/* restore r3 */
279
280#ifdef KDTRACE_HOOKS
281	.data
282	.globl	dtrace_invop_calltrap_addr
283	.align	8
284	.type	dtrace_invop_calltrap_addr, @object
285        .size	dtrace_invop_calltrap_addr, 8
286dtrace_invop_calltrap_addr:
287	.word	0
288	.word	0
289
290	.text
291#endif
292
293/*
294 * Processor reset exception handler. These are typically
295 * the first instructions the processor executes after a
296 * software reset. We do this in two bits so that we are
297 * not still hanging around in the trap handling region
298 * once the MMU is turned on.
299 */
300	.globl	CNAME(rstcode), CNAME(rstcodeend), CNAME(cpu_reset_handler)
301	.p2align 3
302CNAME(rstcode):
303	/* Explicitly set MSR[SF] */
304	mfmsr	%r9
305	li	%r8,1
306	insrdi	%r9,%r8,1,0
307	mtmsrd	%r9
308	isync
309
310	bl	1f
311	.llong	cpu_reset_handler /* Make sure to maintain 8-byte alignment */
3121:	mflr	%r9
313	ld	%r9,0(%r9)
314	mtlr	%r9
315
316	blr
317CNAME(rstcodeend):
318
319cpu_reset_handler:
320	GET_TOCBASE(%r2)
321
322	ld	%r1,TOC_REF(tmpstk)(%r2)	/* get new SP */
323	addi	%r1,%r1,(TMPSTKSZ-48)
324
325	bl	CNAME(cpudep_ap_early_bootstrap) /* Set PCPU */
326	nop
327	lis	%r3,1@l
328	bl	CNAME(pmap_cpu_bootstrap)	/* Turn on virtual memory */
329	nop
330	bl	CNAME(cpudep_ap_bootstrap)	/* Set up PCPU and stack */
331	nop
332	mr	%r1,%r3				/* Use new stack */
333	bl	CNAME(cpudep_ap_setup)
334	nop
335	GET_CPUINFO(%r5)
336	ld	%r3,(PC_RESTORE)(%r5)
337	cmpldi	%cr0,%r3,0
338	beq	%cr0,2f
339	nop
340	li	%r4,1
341	b	CNAME(longjmp)
342	nop
3432:
344#ifdef SMP
345	bl	CNAME(machdep_ap_bootstrap)	/* And away! */
346	nop
347#endif
348
349	/* Should not be reached */
3509:
351	b	9b
352
353/*
354 * This code gets copied to all the trap vectors
355 * (except ISI/DSI, ALI, and the interrupts). Has to fit in 8 instructions!
356 */
357
358	.globl	CNAME(trapcode),CNAME(trapcodeend)
359	.p2align 3
360CNAME(trapcode):
361	mtsprg1	%r1			/* save SP */
362	mflr	%r1			/* Save the old LR in r1 */
363	mtsprg2 %r1			/* And then in SPRG2 */
364	ld	%r1,TRAP_GENTRAP(0)
365	mtlr	%r1
366	li	%r1, 0xe0		/* How to get the vector from LR */
367	blrl				/* Branch to generictrap */
368CNAME(trapcodeend):
369
370/*
371 * For SLB misses: do special things for the kernel
372 *
373 * Note: SPRG1 is always safe to overwrite any time the MMU is on, which is
374 * the only time this can be called.
375 */
376	.globl	CNAME(slbtrap),CNAME(slbtrapend)
377	.p2align 3
378CNAME(slbtrap):
379	mtsprg1	%r1			/* save SP */
380	GET_CPUINFO(%r1)
381	std	%r2,(PC_SLBSAVE+16)(%r1)
382	mfcr	%r2			/* save CR */
383	std	%r2,(PC_SLBSAVE+104)(%r1)
384	mfsrr1	%r2			/* test kernel mode */
385	mtcr	%r2
386	bf	17,2f			/* branch if PSL_PR is false */
387	/* User mode */
388	ld	%r2,(PC_SLBSAVE+104)(%r1) /* Restore CR */
389	mtcr	%r2
390	ld	%r2,(PC_SLBSAVE+16)(%r1) /* Restore R2 */
391	mflr	%r1			/* Save the old LR in r1 */
392	mtsprg2 %r1			/* And then in SPRG2 */
393					/* 52 bytes so far */
394	bl	1f
395	.llong	generictrap
3961:	mflr	%r1
397	ld	%r1,0(%r1)
398	mtlr	%r1
399	li	%r1, 0x80		/* How to get the vector from LR */
400	blrl				/* Branch to generictrap */
401					/* 84 bytes */
4022:	mflr	%r2			/* Save the old LR in r2 */
403	nop
404	bl	3f			/* Begin dance to jump to kern_slbtrap*/
405	.llong	kern_slbtrap
4063:	mflr	%r1
407	ld	%r1,0(%r1)
408	mtlr	%r1
409	GET_CPUINFO(%r1)
410	blrl				/* 124 bytes -- 4 to spare */
411CNAME(slbtrapend):
412
413kern_slbtrap:
414	std	%r2,(PC_SLBSAVE+136)(%r1) /* old LR */
415	std	%r3,(PC_SLBSAVE+24)(%r1) /* save R3 */
416
417	/* Check if this needs to be handled as a regular trap (userseg miss) */
418	mflr	%r2
419	andi.	%r2,%r2,0xff80
420	cmpwi	%r2,0x380
421	bne	1f
422	mfdar	%r2
423	b	2f
4241:	mfsrr0	%r2
4252:	/* r2 now contains the fault address */
426	lis	%r3,SEGMENT_MASK@highesta
427	ori	%r3,%r3,SEGMENT_MASK@highera
428	sldi	%r3,%r3,32
429	oris	%r3,%r3,SEGMENT_MASK@ha
430	ori	%r3,%r3,SEGMENT_MASK@l
431	and	%r2,%r2,%r3	/* R2 = segment base address */
432	lis	%r3,USER_ADDR@highesta
433	ori	%r3,%r3,USER_ADDR@highera
434	sldi	%r3,%r3,32
435	oris	%r3,%r3,USER_ADDR@ha
436	ori	%r3,%r3,USER_ADDR@l
437	cmpd	%r2,%r3		/* Compare fault base to USER_ADDR */
438	bne	3f
439
440	/* User seg miss, handle as a regular trap */
441	ld	%r2,(PC_SLBSAVE+104)(%r1) /* Restore CR */
442	mtcr	%r2
443	ld	%r2,(PC_SLBSAVE+16)(%r1) /* Restore R2,R3 */
444	ld	%r3,(PC_SLBSAVE+24)(%r1)
445	ld	%r1,(PC_SLBSAVE+136)(%r1) /* Save the old LR in r1 */
446	mtsprg2 %r1			/* And then in SPRG2 */
447	li	%r1, 0x80		/* How to get the vector from LR */
448	b	generictrap		/* Retain old LR using b */
449
4503:	/* Real kernel SLB miss */
451	std	%r0,(PC_SLBSAVE+0)(%r1)	/* free all volatile regs */
452	mfsprg1	%r2			/* Old R1 */
453	std	%r2,(PC_SLBSAVE+8)(%r1)
454	/* R2,R3 already saved */
455	std	%r4,(PC_SLBSAVE+32)(%r1)
456	std	%r5,(PC_SLBSAVE+40)(%r1)
457	std	%r6,(PC_SLBSAVE+48)(%r1)
458	std	%r7,(PC_SLBSAVE+56)(%r1)
459	std	%r8,(PC_SLBSAVE+64)(%r1)
460	std	%r9,(PC_SLBSAVE+72)(%r1)
461	std	%r10,(PC_SLBSAVE+80)(%r1)
462	std	%r11,(PC_SLBSAVE+88)(%r1)
463	std	%r12,(PC_SLBSAVE+96)(%r1)
464	/* CR already saved */
465	mfxer	%r2			/* save XER */
466	std	%r2,(PC_SLBSAVE+112)(%r1)
467	mflr	%r2			/* save LR (SP already saved) */
468	std	%r2,(PC_SLBSAVE+120)(%r1)
469	mfctr	%r2			/* save CTR */
470	std	%r2,(PC_SLBSAVE+128)(%r1)
471
472	/* Call handler */
473	addi	%r1,%r1,PC_SLBSTACK-48+1024
474	li	%r2,~15
475	and	%r1,%r1,%r2
476	GET_TOCBASE(%r2)
477	mflr	%r3
478	andi.	%r3,%r3,0xff80
479	mfdar	%r4
480	mfsrr0	%r5
481	bl	handle_kernel_slb_spill
482	nop
483
484	/* Save r28-31, restore r4-r12 */
485	GET_CPUINFO(%r1)
486	ld	%r4,(PC_SLBSAVE+32)(%r1)
487	ld	%r5,(PC_SLBSAVE+40)(%r1)
488	ld	%r6,(PC_SLBSAVE+48)(%r1)
489	ld	%r7,(PC_SLBSAVE+56)(%r1)
490	ld	%r8,(PC_SLBSAVE+64)(%r1)
491	ld	%r9,(PC_SLBSAVE+72)(%r1)
492	ld	%r10,(PC_SLBSAVE+80)(%r1)
493	ld	%r11,(PC_SLBSAVE+88)(%r1)
494	ld	%r12,(PC_SLBSAVE+96)(%r1)
495	std	%r28,(PC_SLBSAVE+64)(%r1)
496	std	%r29,(PC_SLBSAVE+72)(%r1)
497	std	%r30,(PC_SLBSAVE+80)(%r1)
498	std	%r31,(PC_SLBSAVE+88)(%r1)
499
500	/* Restore kernel mapping */
501	bl	restore_kernsrs
502
503	/* Restore remaining registers */
504	ld	%r28,(PC_SLBSAVE+64)(%r1)
505	ld	%r29,(PC_SLBSAVE+72)(%r1)
506	ld	%r30,(PC_SLBSAVE+80)(%r1)
507	ld	%r31,(PC_SLBSAVE+88)(%r1)
508
509	ld	%r2,(PC_SLBSAVE+104)(%r1)
510	mtcr	%r2
511	ld	%r2,(PC_SLBSAVE+112)(%r1)
512	mtxer	%r2
513	ld	%r2,(PC_SLBSAVE+120)(%r1)
514	mtlr	%r2
515	ld	%r2,(PC_SLBSAVE+128)(%r1)
516	mtctr	%r2
517	ld	%r2,(PC_SLBSAVE+136)(%r1)
518	mtlr	%r2
519
520	/* Restore r0-r3 */
521	ld	%r0,(PC_SLBSAVE+0)(%r1)
522	ld	%r2,(PC_SLBSAVE+16)(%r1)
523	ld	%r3,(PC_SLBSAVE+24)(%r1)
524	mfsprg1	%r1
525
526	/* Back to whatever we were doing */
527	rfid
528
529/*
530 * For ALI: has to save DSISR and DAR
531 */
532	.globl	CNAME(alitrap),CNAME(aliend)
533CNAME(alitrap):
534	mtsprg1	%r1			/* save SP */
535	GET_CPUINFO(%r1)
536	std	%r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)	/* free r27-r31 */
537	std	%r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
538	std	%r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
539	std	%r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
540	std	%r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
541	mfdar	%r30
542	mfdsisr	%r31
543	std	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
544	std	%r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1)
545	mfsprg1	%r1			/* restore SP, in case of branch */
546	mflr	%r28			/* save LR */
547	mfcr	%r29			/* save CR */
548
549	/* Begin dance to branch to s_trap in a bit */
550	b	1f
551	.p2align 3
5521:	nop
553	bl	1f
554	.llong	s_trap
5551:	mflr	%r31
556	ld	%r31,0(%r31)
557	mtlr	%r31
558
559	/* Put our exception vector in SPRG3 */
560	li	%r31, EXC_ALI
561	mtsprg3	%r31
562
563	/* Test whether we already had PR set */
564	mfsrr1	%r31
565	mtcr	%r31
566	blrl
567CNAME(aliend):
568
569/*
570 * Similar to the above for DSI
571 * Has to handle standard pagetable spills
572 */
573	.globl	CNAME(dsitrap),CNAME(dsiend)
574	.p2align 3
575CNAME(dsitrap):
576	mtsprg1	%r1			/* save SP */
577	GET_CPUINFO(%r1)
578	std	%r27,(PC_DISISAVE+CPUSAVE_R27)(%r1)	/* free r27-r31 */
579	std	%r28,(PC_DISISAVE+CPUSAVE_R28)(%r1)
580	std	%r29,(PC_DISISAVE+CPUSAVE_R29)(%r1)
581	std	%r30,(PC_DISISAVE+CPUSAVE_R30)(%r1)
582	std	%r31,(PC_DISISAVE+CPUSAVE_R31)(%r1)
583	mfcr	%r29			/* save CR */
584	mfxer	%r30			/* save XER */
585	mtsprg2	%r30			/* in SPRG2 */
586	mfsrr1	%r31			/* test kernel mode */
587	mtcr	%r31
588	mflr	%r28			/* save LR (SP already saved) */
589	bl	1f			/* Begin branching to disitrap */
590	.llong	disitrap
5911:	mflr	%r1
592	ld	%r1,0(%r1)
593	mtlr	%r1
594	blrl				/* Branch to generictrap */
595CNAME(dsiend):
596
597/*
598 * Preamble code for DSI/ISI traps
599 */
600disitrap:
601	/* Write the trap vector to SPRG3 by computing LR & 0xff00 */
602	mflr	%r1
603	andi.	%r1,%r1,0xff00
604	mtsprg3	%r1
605
606	GET_CPUINFO(%r1)
607	ld	%r31,(PC_DISISAVE+CPUSAVE_R27)(%r1)
608	std	%r31,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)
609	ld	%r30,(PC_DISISAVE+CPUSAVE_R28)(%r1)
610	std	%r30,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
611	ld	%r31,(PC_DISISAVE+CPUSAVE_R29)(%r1)
612	std	%r31,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
613	ld	%r30,(PC_DISISAVE+CPUSAVE_R30)(%r1)
614	std	%r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
615	ld	%r31,(PC_DISISAVE+CPUSAVE_R31)(%r1)
616	std	%r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
617	mfdar	%r30
618	mfdsisr	%r31
619	std	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
620	std	%r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1)
621
622#ifdef KDB
623	/* Try to detect a kernel stack overflow */
624	mfsrr1	%r31
625	mtcr	%r31
626	bt	17,realtrap		/* branch is user mode */
627	mfsprg1	%r31			/* get old SP */
628	clrrdi	%r31,%r31,12		/* Round SP down to nearest page */
629	sub.	%r30,%r31,%r30		/* SP - DAR */
630	bge	1f
631	neg	%r30,%r30		/* modulo value */
6321:	cmpldi	%cr0,%r30,4096		/* is DAR within a page of SP? */
633	bge	%cr0,realtrap		/* no, too far away. */
634
635	/* Now convert this DSI into a DDB trap.  */
636	GET_CPUINFO(%r1)
637	ld	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1) /* get DAR */
638	std	%r30,(PC_DBSAVE  +CPUSAVE_AIM_DAR)(%r1) /* save DAR */
639	ld	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1) /* get DSISR */
640	std	%r30,(PC_DBSAVE  +CPUSAVE_AIM_DSISR)(%r1) /* save DSISR */
641	ld	%r31,(PC_DISISAVE+CPUSAVE_R27)(%r1) /* get  r27 */
642	std	%r31,(PC_DBSAVE  +CPUSAVE_R27)(%r1) /* save r27 */
643	ld	%r30,(PC_DISISAVE+CPUSAVE_R28)(%r1) /* get  r28 */
644	std	%r30,(PC_DBSAVE  +CPUSAVE_R28)(%r1) /* save r28 */
645	ld	%r31,(PC_DISISAVE+CPUSAVE_R29)(%r1) /* get  r29 */
646	std	%r31,(PC_DBSAVE  +CPUSAVE_R29)(%r1) /* save r29 */
647	ld	%r30,(PC_DISISAVE+CPUSAVE_R30)(%r1) /* get  r30 */
648	std	%r30,(PC_DBSAVE  +CPUSAVE_R30)(%r1) /* save r30 */
649	ld	%r31,(PC_DISISAVE+CPUSAVE_R31)(%r1) /* get  r31 */
650	std	%r31,(PC_DBSAVE  +CPUSAVE_R31)(%r1) /* save r31 */
651	b	dbtrap
652#endif
653
654	/* XXX need stack probe here */
655realtrap:
656/* Test whether we already had PR set */
657	mfsrr1	%r1
658	mtcr	%r1
659	mfsprg1	%r1			/* restore SP (might have been
660					   overwritten) */
661	bf	17,k_trap		/* branch if PSL_PR is false */
662	GET_CPUINFO(%r1)
663	ld	%r1,PC_CURPCB(%r1)
664	mr	%r27,%r28		/* Save LR, r29 */
665	mtsprg2	%r29
666	bl	restore_kernsrs		/* enable kernel mapping */
667	mfsprg2	%r29
668	mr	%r28,%r27
669	b	s_trap
670
671/*
672 * generictrap does some standard setup for trap handling to minimize
673 * the code that need be installed in the actual vectors. It expects
674 * the following conditions.
675 *
676 * R1 - Trap vector = LR & (0xff00 | R1)
677 * SPRG1 - Original R1 contents
678 * SPRG2 - Original LR
679 */
680
681	.globl	CNAME(generictrap)
682generictrap:
683	/* Save R1 for computing the exception vector */
684	mtsprg3 %r1
685
686	/* Save interesting registers */
687	GET_CPUINFO(%r1)
688	std	%r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)	/* free r27-r31 */
689	std	%r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
690	std	%r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
691	std	%r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
692	std	%r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
693	mfdar	%r30
694	std	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
695	mfsprg1	%r1			/* restore SP, in case of branch */
696	mfsprg2	%r28			/* save LR */
697	mfcr	%r29			/* save CR */
698
699	/* Compute the exception vector from the link register */
700	mfsprg3 %r31
701	ori	%r31,%r31,0xff00
702	mflr	%r30
703	addi	%r30,%r30,-4 /* The branch instruction, not the next */
704	and	%r30,%r30,%r31
705	mtsprg3	%r30
706
707	/* Test whether we already had PR set */
708	mfsrr1	%r31
709	mtcr	%r31
710
711s_trap:
712	bf	17,k_trap		/* branch if PSL_PR is false */
713	GET_CPUINFO(%r1)
714u_trap:
715	ld	%r1,PC_CURPCB(%r1)
716	mr	%r27,%r28		/* Save LR, r29 */
717	mtsprg2	%r29
718	bl	restore_kernsrs		/* enable kernel mapping */
719	mfsprg2	%r29
720	mr	%r28,%r27
721
722/*
723 * Now the common trap catching code.
724 */
725k_trap:
726	FRAME_SETUP(PC_TEMPSAVE)
727/* Call C interrupt dispatcher: */
728trapagain:
729	GET_TOCBASE(%r2)
730	addi	%r3,%r1,48
731	bl	CNAME(powerpc_interrupt)
732	nop
733
734	.globl	CNAME(trapexit)	/* backtrace code sentinel */
735CNAME(trapexit):
736/* Disable interrupts: */
737	mfmsr	%r3
738	andi.	%r3,%r3,~PSL_EE@l
739	mtmsr	%r3
740	isync
741/* Test AST pending: */
742	ld	%r5,FRAME_SRR1+48(%r1)
743	mtcr	%r5
744	bf	17,1f			/* branch if PSL_PR is false */
745
746	GET_CPUINFO(%r3)		/* get per-CPU pointer */
747	lwz	%r4, TD_FLAGS(%r13)	/* get thread flags value */
748	lis	%r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@h
749	ori	%r5,%r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@l
750	and.	%r4,%r4,%r5
751	beq	1f
752	mfmsr	%r3			/* re-enable interrupts */
753	ori	%r3,%r3,PSL_EE@l
754	mtmsr	%r3
755	isync
756	GET_TOCBASE(%r2)
757	addi	%r3,%r1,48
758	bl	CNAME(ast)
759	nop
760	.globl	CNAME(asttrapexit)	/* backtrace code sentinel #2 */
761CNAME(asttrapexit):
762	b	trapexit		/* test ast ret value ? */
7631:
764	FRAME_LEAVE(PC_TEMPSAVE)
765	rfid
766
767#if defined(KDB)
768/*
769 * Deliberate entry to dbtrap
770 */
771ASENTRY_NOPROF(breakpoint)
772	mtsprg1	%r1
773	mfmsr	%r3
774	mtsrr1	%r3
775	andi.	%r3,%r3,~(PSL_EE|PSL_ME)@l
776	mtmsr	%r3			/* disable interrupts */
777	isync
778	GET_CPUINFO(%r3)
779	std	%r27,(PC_DBSAVE+CPUSAVE_R27)(%r3)
780	std	%r28,(PC_DBSAVE+CPUSAVE_R28)(%r3)
781	std	%r29,(PC_DBSAVE+CPUSAVE_R29)(%r3)
782	std	%r30,(PC_DBSAVE+CPUSAVE_R30)(%r3)
783	std	%r31,(PC_DBSAVE+CPUSAVE_R31)(%r3)
784	mflr	%r28
785	li	%r29,EXC_BPT
786	mtlr	%r29
787	mfcr	%r29
788	mtsrr0	%r28
789
790/*
791 * Now the kdb trap catching code.
792 */
793dbtrap:
794	/* Write the trap vector to SPRG3 by computing LR & 0xff00 */
795	mflr	%r1
796	andi.	%r1,%r1,0xff00
797	mtsprg3	%r1
798
799	ld	%r1,TRAP_TOCBASE(0)		/* get new SP */
800	ld	%r1,TOC_REF(tmpstk)(%r1)
801	addi	%r1,%r1,(TMPSTKSZ-48)
802
803	FRAME_SETUP(PC_DBSAVE)
804/* Call C trap code: */
805	GET_TOCBASE(%r2)
806	addi	%r3,%r1,48
807	bl	CNAME(db_trap_glue)
808	nop
809	or.	%r3,%r3,%r3
810	bne	dbleave
811/* This wasn't for KDB, so switch to real trap: */
812	ld	%r3,FRAME_EXC+48(%r1)	/* save exception */
813	GET_CPUINFO(%r4)
814	std	%r3,(PC_DBSAVE+CPUSAVE_R31)(%r4)
815	FRAME_LEAVE(PC_DBSAVE)
816	mtsprg1	%r1			/* prepare for entrance to realtrap */
817	GET_CPUINFO(%r1)
818	std	%r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)
819	std	%r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
820	std	%r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
821	std	%r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
822	std	%r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
823	mflr	%r28
824	mfcr	%r29
825	ld	%r31,(PC_DBSAVE+CPUSAVE_R31)(%r1)
826	mtsprg3	%r31			/* SPRG3 was clobbered by FRAME_LEAVE */
827	mfsprg1	%r1
828	b	realtrap
829dbleave:
830	FRAME_LEAVE(PC_DBSAVE)
831	rfid
832
833/*
834 * In case of KDB we want a separate trap catcher for it
835 */
836	.globl	CNAME(dblow),CNAME(dbend)
837	.p2align 3
838CNAME(dblow):
839	mtsprg1	%r1			/* save SP */
840	mtsprg2	%r29			/* save r29 */
841	mfcr	%r29			/* save CR in r29 */
842	mfsrr1	%r1
843	mtcr	%r1
844	bf	17,1f			/* branch if privileged */
845
846	/* Unprivileged case */
847	mtcr	%r29			/* put the condition register back */
848        mfsprg2	%r29			/* ... and r29 */
849        mflr	%r1			/* save LR */
850	mtsprg2 %r1			/* And then in SPRG2 */
851
852	ld	%r1, TRAP_GENTRAP(0)	/* Get branch address */
853	mtlr	%r1
854	li	%r1, 0	 		/* How to get the vector from LR */
855	blrl				/* Branch to generictrap */
856
8571:
858	GET_CPUINFO(%r1)
859	std	%r27,(PC_DBSAVE+CPUSAVE_R27)(%r1)	/* free r27 */
860	std	%r28,(PC_DBSAVE+CPUSAVE_R28)(%r1)	/* free r28 */
861        mfsprg2	%r28				/* r29 holds cr...  */
862        std	%r28,(PC_DBSAVE+CPUSAVE_R29)(%r1)	/* free r29 */
863        std	%r30,(PC_DBSAVE+CPUSAVE_R30)(%r1)	/* free r30 */
864        std	%r31,(PC_DBSAVE+CPUSAVE_R31)(%r1)	/* free r31 */
865        mflr	%r28					/* save LR */
866	nop						/* alignment */
867	bl	9f					/* Begin branch */
868	.llong	dbtrap
8699:	mflr	%r1
870	ld	%r1,0(%r1)
871	mtlr	%r1
872	blrl				/* Branch to generictrap */
873CNAME(dbend):
874#endif /* KDB */
875