xref: /freebsd/sys/powerpc/aim/trap_subr64.S (revision 731d06abf2105cc0873fa84e972178f9f37ca760)
1/* $FreeBSD$ */
2/* $NetBSD: trap_subr.S,v 1.20 2002/04/22 23:20:08 kleink Exp $	*/
3
4/*-
5 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
6 * Copyright (C) 1995, 1996 TooLs GmbH.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by TooLs GmbH.
20 * 4. The name of TooLs GmbH may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/*
36 * NOTICE: This is not a standalone file.  to use it, #include it in
37 * your port's locore.S, like so:
38 *
39 *	#include <powerpc/aim/trap_subr.S>
40 */
41
42/* Locate the per-CPU data structure */
43#define GET_CPUINFO(r)  \
44        mfsprg0  r
45#define GET_TOCBASE(r)  \
46	lis	r,DMAP_BASE_ADDRESS@highesta;	/* To real-mode alias/dmap */ \
47	sldi	r,r,32;							\
48	ori	r,r,TRAP_TOCBASE;	/* Magic address for TOC */	\
49	ld	r,0(r)
50
51/*
52 * Restore SRs for a pmap
53 *
54 * Requires that r28-r31 be scratch, with r28 initialized to the SLB cache
55 */
56
57/*
58 * User SRs are loaded through a pointer to the current pmap.
59 */
60restore_usersrs:
61	GET_CPUINFO(%r28)
62	ld	%r28,PC_USERSLB(%r28)
63	cmpdi	%r28, 0			/* If user SLB pointer NULL, exit */
64	beqlr
65
66	li	%r29, 0			/* Set the counter to zero */
67
68	slbia
69	slbmfee	%r31,%r29
70	clrrdi	%r31,%r31,28
71	slbie	%r31
721:	ld	%r31, 0(%r28)		/* Load SLB entry pointer */
73	cmpdi	%r31, 0			/* If NULL, stop */
74	beqlr
75
76	ld	%r30, 0(%r31)		/* Load SLBV */
77	ld	%r31, 8(%r31)		/* Load SLBE */
78	or	%r31, %r31, %r29	/*  Set SLBE slot */
79	slbmte	%r30, %r31		/* Install SLB entry */
80
81	addi	%r28, %r28, 8		/* Advance pointer */
82	addi	%r29, %r29, 1
83	b	1b			/* Repeat */
84
85/*
86 * Kernel SRs are loaded directly from the PCPU fields
87 */
88restore_kernsrs:
89	GET_CPUINFO(%r28)
90	addi	%r28,%r28,PC_KERNSLB
91	ld	%r29,16(%r28)		/* One past USER_SLB_SLOT */
92	cmpdi	%r29,0
93	beqlr				/* If first kernel entry is invalid,
94					 * SLBs not in use, so exit early */
95
96	/* Otherwise, set up SLBs */
97	li	%r29, 0			/* Set the counter to zero */
98
99	slbia
100	slbmfee	%r31,%r29
101	clrrdi	%r31,%r31,28
102	slbie	%r31
1031:	cmpdi	%r29, USER_SLB_SLOT	/* Skip the user slot */
104	beq-	2f
105
106	ld	%r31, 8(%r28)		/* Load SLBE */
107	cmpdi	%r31, 0			/* If SLBE is not valid, stop */
108	beqlr
109	ld	%r30, 0(%r28)		/* Load SLBV  */
110	slbmte	%r30, %r31		/* Install SLB entry */
111
1122:	addi	%r28, %r28, 16		/* Advance pointer */
113	addi	%r29, %r29, 1
114	cmpdi	%r29, 64		/* Repeat if we are not at the end */
115	blt	1b
116	blr
117
118/*
119 * FRAME_SETUP assumes:
120 *	SPRG1		SP (1)
121 * 	SPRG3		trap type
122 *	savearea	r27-r31,DAR,DSISR   (DAR & DSISR only for DSI traps)
123 *	r28		LR
124 *	r29		CR
125 *	r30		scratch
126 *	r31		scratch
127 *	r1		kernel stack
128 *	SRR0/1		as at start of trap
129 *
130 * NOTE: SPRG1 is never used while the MMU is on, making it safe to reuse
131 * in any real-mode fault handler, including those handling double faults.
132 */
133#define	FRAME_SETUP(savearea)						\
134/* Have to enable translation to allow access of kernel stack: */	\
135	GET_CPUINFO(%r31);						\
136	mfsrr0	%r30;							\
137	std	%r30,(savearea+CPUSAVE_SRR0)(%r31);	/* save SRR0 */	\
138	mfsrr1	%r30;							\
139	std	%r30,(savearea+CPUSAVE_SRR1)(%r31);	/* save SRR1 */	\
140	mfsprg1	%r31;			/* get saved SP (clears SPRG1) */ \
141	mfmsr	%r30;							\
142	ori	%r30,%r30,(PSL_DR|PSL_IR|PSL_RI)@l; /* relocation on */	\
143	mtmsr	%r30;			/* stack can now be accessed */	\
144	isync;								\
145	stdu	%r31,-(FRAMELEN+288)(%r1); /* save it in the callframe */ \
146	std	%r0, FRAME_0+48(%r1);	/* save r0 in the trapframe */	\
147	std	%r31,FRAME_1+48(%r1);	/* save SP   "      "       */	\
148	std	%r2, FRAME_2+48(%r1);	/* save r2   "      "       */	\
149	std	%r28,FRAME_LR+48(%r1);	/* save LR   "      "       */	\
150	std	%r29,FRAME_CR+48(%r1);	/* save CR   "      "       */	\
151	GET_CPUINFO(%r2);						\
152	ld	%r27,(savearea+CPUSAVE_R27)(%r2); /* get saved r27 */	\
153	ld	%r28,(savearea+CPUSAVE_R28)(%r2); /* get saved r28 */	\
154	ld	%r29,(savearea+CPUSAVE_R29)(%r2); /* get saved r29 */	\
155	ld	%r30,(savearea+CPUSAVE_R30)(%r2); /* get saved r30 */	\
156	ld	%r31,(savearea+CPUSAVE_R31)(%r2); /* get saved r31 */	\
157	std	%r3,  FRAME_3+48(%r1);	/* save r3-r31 */		\
158	std	%r4,  FRAME_4+48(%r1);					\
159	std	%r5,  FRAME_5+48(%r1);					\
160	std	%r6,  FRAME_6+48(%r1);					\
161	std	%r7,  FRAME_7+48(%r1);					\
162	std	%r8,  FRAME_8+48(%r1);					\
163	std	%r9,  FRAME_9+48(%r1);					\
164	std	%r10, FRAME_10+48(%r1);					\
165	std	%r11, FRAME_11+48(%r1);					\
166	std	%r12, FRAME_12+48(%r1);					\
167	std	%r13, FRAME_13+48(%r1);					\
168	std	%r14, FRAME_14+48(%r1);					\
169	std	%r15, FRAME_15+48(%r1);					\
170	std	%r16, FRAME_16+48(%r1);					\
171	std	%r17, FRAME_17+48(%r1);					\
172	std	%r18, FRAME_18+48(%r1);					\
173	std	%r19, FRAME_19+48(%r1);					\
174	std	%r20, FRAME_20+48(%r1);					\
175	std	%r21, FRAME_21+48(%r1);					\
176	std	%r22, FRAME_22+48(%r1);					\
177	std	%r23, FRAME_23+48(%r1);					\
178	std	%r24, FRAME_24+48(%r1);					\
179	std	%r25, FRAME_25+48(%r1);					\
180	std	%r26, FRAME_26+48(%r1);					\
181	std	%r27, FRAME_27+48(%r1);					\
182	std	%r28, FRAME_28+48(%r1);					\
183	std	%r29, FRAME_29+48(%r1);					\
184	std	%r30, FRAME_30+48(%r1);					\
185	std	%r31, FRAME_31+48(%r1);					\
186	ld	%r28,(savearea+CPUSAVE_AIM_DAR)(%r2);  /* saved DAR */	\
187	ld	%r29,(savearea+CPUSAVE_AIM_DSISR)(%r2);/* saved DSISR */\
188	ld	%r30,(savearea+CPUSAVE_SRR0)(%r2); /* saved SRR0 */	\
189	ld	%r31,(savearea+CPUSAVE_SRR1)(%r2); /* saved SRR1 */	\
190	mfxer	%r3;							\
191	mfctr	%r4;							\
192	mfsprg3	%r5;							\
193	std	%r3, FRAME_XER+48(1);	/* save xer/ctr/exc */		\
194	std	%r4, FRAME_CTR+48(1);					\
195	std	%r5, FRAME_EXC+48(1);					\
196	std	%r28,FRAME_AIM_DAR+48(1);				\
197	std	%r29,FRAME_AIM_DSISR+48(1); /* save dsisr/srr0/srr1 */	\
198	std	%r30,FRAME_SRR0+48(1);					\
199	std	%r31,FRAME_SRR1+48(1);					\
200	ld	%r13,PC_CURTHREAD(%r2)	/* set kernel curthread */
201
202#define	FRAME_LEAVE(savearea)						\
203/* Disable exceptions: */						\
204	mfmsr	%r2;							\
205	andi.	%r2,%r2,~PSL_EE@l;					\
206	mtmsr	%r2;							\
207	isync;								\
208/* Now restore regs: */							\
209	ld	%r2,FRAME_SRR0+48(%r1);					\
210	ld	%r3,FRAME_SRR1+48(%r1);					\
211	ld	%r4,FRAME_CTR+48(%r1);					\
212	ld	%r5,FRAME_XER+48(%r1);					\
213	ld	%r6,FRAME_LR+48(%r1);					\
214	GET_CPUINFO(%r7);						\
215	std	%r2,(savearea+CPUSAVE_SRR0)(%r7); /* save SRR0 */	\
216	std	%r3,(savearea+CPUSAVE_SRR1)(%r7); /* save SRR1 */	\
217	ld	%r7,FRAME_CR+48(%r1);					\
218	mtctr	%r4;							\
219	mtxer	%r5;							\
220	mtlr	%r6;							\
221	mtsprg2	%r7;			/* save cr */			\
222	ld	%r31,FRAME_31+48(%r1);   /* restore r0-31 */		\
223	ld	%r30,FRAME_30+48(%r1);					\
224	ld	%r29,FRAME_29+48(%r1);					\
225	ld	%r28,FRAME_28+48(%r1);					\
226	ld	%r27,FRAME_27+48(%r1);					\
227	ld	%r26,FRAME_26+48(%r1);					\
228	ld	%r25,FRAME_25+48(%r1);					\
229	ld	%r24,FRAME_24+48(%r1);					\
230	ld	%r23,FRAME_23+48(%r1);					\
231	ld	%r22,FRAME_22+48(%r1);					\
232	ld	%r21,FRAME_21+48(%r1);					\
233	ld	%r20,FRAME_20+48(%r1);					\
234	ld	%r19,FRAME_19+48(%r1);					\
235	ld	%r18,FRAME_18+48(%r1);					\
236	ld	%r17,FRAME_17+48(%r1);					\
237	ld	%r16,FRAME_16+48(%r1);					\
238	ld	%r15,FRAME_15+48(%r1);					\
239	ld	%r14,FRAME_14+48(%r1);					\
240	ld	%r13,FRAME_13+48(%r1);					\
241	ld	%r12,FRAME_12+48(%r1);					\
242	ld	%r11,FRAME_11+48(%r1);					\
243	ld	%r10,FRAME_10+48(%r1);					\
244	ld	%r9, FRAME_9+48(%r1);					\
245	ld	%r8, FRAME_8+48(%r1);					\
246	ld	%r7, FRAME_7+48(%r1);					\
247	ld	%r6, FRAME_6+48(%r1);					\
248	ld	%r5, FRAME_5+48(%r1);					\
249	ld	%r4, FRAME_4+48(%r1);					\
250	ld	%r3, FRAME_3+48(%r1);					\
251	ld	%r2, FRAME_2+48(%r1);					\
252	ld	%r0, FRAME_0+48(%r1);					\
253	ld	%r1, FRAME_1+48(%r1);					\
254/* Can't touch %r1 from here on */					\
255	mtsprg3	%r3;			/* save r3 */			\
256/* Disable translation, machine check and recoverability: */		\
257	mfmsr	%r3;							\
258	andi.	%r3,%r3,~(PSL_DR|PSL_IR|PSL_ME|PSL_RI)@l;		\
259	mtmsr	%r3;							\
260	isync;								\
261/* Decide whether we return to user mode: */				\
262	GET_CPUINFO(%r3);						\
263	ld	%r3,(savearea+CPUSAVE_SRR1)(%r3);			\
264	mtcr	%r3;							\
265	bf	17,1f;			/* branch if PSL_PR is false */	\
266/* Restore user SRs */							\
267	GET_CPUINFO(%r3);						\
268	std	%r27,(savearea+CPUSAVE_R27)(%r3);			\
269	std	%r28,(savearea+CPUSAVE_R28)(%r3);			\
270	std	%r29,(savearea+CPUSAVE_R29)(%r3);			\
271	std	%r30,(savearea+CPUSAVE_R30)(%r3);			\
272	std	%r31,(savearea+CPUSAVE_R31)(%r3);			\
273	mflr	%r27;			/* preserve LR */		\
274	bl	restore_usersrs;	/* uses r28-r31 */		\
275	mtlr	%r27;							\
276	ld	%r31,(savearea+CPUSAVE_R31)(%r3);			\
277	ld	%r30,(savearea+CPUSAVE_R30)(%r3);			\
278	ld	%r29,(savearea+CPUSAVE_R29)(%r3);			\
279	ld	%r28,(savearea+CPUSAVE_R28)(%r3);			\
280	ld	%r27,(savearea+CPUSAVE_R27)(%r3);			\
2811:	mfsprg2	%r3;			/* restore cr */		\
282	mtcr	%r3;							\
283	GET_CPUINFO(%r3);						\
284	ld	%r3,(savearea+CPUSAVE_SRR0)(%r3); /* restore srr0 */	\
285	mtsrr0	%r3;							\
286	GET_CPUINFO(%r3);						\
287	ld	%r3,(savearea+CPUSAVE_SRR1)(%r3); /* restore srr1 */	\
288	mtsrr1	%r3;							\
289	mfsprg3	%r3			/* restore r3 */
290
291#ifdef KDTRACE_HOOKS
292	.data
293	.globl	dtrace_invop_calltrap_addr
294	.align	8
295	.type	dtrace_invop_calltrap_addr, @object
296        .size	dtrace_invop_calltrap_addr, 8
297dtrace_invop_calltrap_addr:
298	.word	0
299	.word	0
300
301	.text
302#endif
303
304/*
305 * Processor reset exception handler. These are typically
306 * the first instructions the processor executes after a
307 * software reset. We do this in two bits so that we are
308 * not still hanging around in the trap handling region
309 * once the MMU is turned on.
310 */
311	.globl	CNAME(rstcode), CNAME(rstcodeend), CNAME(cpu_reset_handler)
312	.globl	CNAME(cpu_wakeup_handler)
313	.p2align 3
314CNAME(rstcode):
315	/*
316	 * Check if this is software reset or
317	 * processor is waking up from power saving mode
318	 * It is software reset when 46:47 = 0b00
319	 */
320	mfsrr1	%r9			/* Load SRR1 into r9 */
321	andis.	%r9,%r9,0x3		/* Logic AND with 46:47 bits */
322	beq	2f			/* Branch if software reset */
323	bl	1f
324	.llong	cpu_wakeup_handler
325
326	/* It is software reset */
327
328	/* Explicitly set MSR[SF] */
3292:	mfmsr	%r9
330	li	%r8,1
331	insrdi	%r9,%r8,1,0
332	mtmsrd	%r9
333	isync
334
335	bl	1f
336	.llong	cpu_reset_handler /* Make sure to maintain 8-byte alignment */
337
3381:	mflr	%r9
339	ld	%r9,0(%r9)
340	mtlr	%r9
341
342	blr
343CNAME(rstcodeend):
344
345cpu_reset_handler:
346	GET_TOCBASE(%r2)
347
348	addis	%r1,%r2,TOC_REF(tmpstk)@ha
349	ld	%r1,TOC_REF(tmpstk)@l(%r1)	/* get new SP */
350	addi	%r1,%r1,(TMPSTKSZ-48)
351
352	bl	CNAME(cpudep_ap_early_bootstrap) /* Set PCPU */
353	nop
354	lis	%r3,1@l
355	bl	CNAME(pmap_cpu_bootstrap)	/* Turn on virtual memory */
356	nop
357	bl	CNAME(cpudep_ap_bootstrap)	/* Set up PCPU and stack */
358	nop
359	mr	%r1,%r3				/* Use new stack */
360	bl	CNAME(cpudep_ap_setup)
361	nop
362	GET_CPUINFO(%r5)
363	ld	%r3,(PC_RESTORE)(%r5)
364	cmpldi	%cr0,%r3,0
365	beq	%cr0,2f
366	nop
367	li	%r4,1
368	bl	CNAME(longjmp)
369	nop
3702:
371#ifdef SMP
372	bl	CNAME(machdep_ap_bootstrap)	/* And away! */
373	nop
374#endif
375
376	/* Should not be reached */
3779:
378	b	9b
379
380cpu_wakeup_handler:
381	GET_TOCBASE(%r2)
382
383	/* Check for false wake up due to badly SRR1 set (eg. by OPAL) */
384	addis	%r3,%r2,TOC_REF(can_wakeup)@ha
385	ld	%r3,TOC_REF(can_wakeup)@l(%r3)
386	ld	%r3,0(%r3)
387	cmpdi	%r3,0
388	beq	cpu_reset_handler
389
390	/* Turn on MMU after return from interrupt */
391	mfsrr1	%r3
392	ori	%r3,%r3,(PSL_IR | PSL_DR)
393	mtsrr1	%r3
394
395	/* Turn on MMU (needed to access PCB) */
396	mfmsr	%r3
397	ori	%r3,%r3,(PSL_IR | PSL_DR)
398	mtmsr	%r3
399	isync
400
401	mfsprg0	%r3
402
403	ld	%r3,PC_CURTHREAD(%r3)	/* Get current thread */
404	ld	%r3,TD_PCB(%r3)		/* Get PCB of current thread */
405	ld	%r12,PCB_CONTEXT(%r3)	/* Load the non-volatile GP regs. */
406	ld	%r13,PCB_CONTEXT+1*8(%r3)
407	ld	%r14,PCB_CONTEXT+2*8(%r3)
408	ld	%r15,PCB_CONTEXT+3*8(%r3)
409	ld	%r16,PCB_CONTEXT+4*8(%r3)
410	ld	%r17,PCB_CONTEXT+5*8(%r3)
411	ld	%r18,PCB_CONTEXT+6*8(%r3)
412	ld	%r19,PCB_CONTEXT+7*8(%r3)
413	ld	%r20,PCB_CONTEXT+8*8(%r3)
414	ld	%r21,PCB_CONTEXT+9*8(%r3)
415	ld	%r22,PCB_CONTEXT+10*8(%r3)
416	ld	%r23,PCB_CONTEXT+11*8(%r3)
417	ld	%r24,PCB_CONTEXT+12*8(%r3)
418	ld	%r25,PCB_CONTEXT+13*8(%r3)
419	ld	%r26,PCB_CONTEXT+14*8(%r3)
420	ld	%r27,PCB_CONTEXT+15*8(%r3)
421	ld	%r28,PCB_CONTEXT+16*8(%r3)
422	ld	%r29,PCB_CONTEXT+17*8(%r3)
423	ld	%r30,PCB_CONTEXT+18*8(%r3)
424	ld	%r31,PCB_CONTEXT+19*8(%r3)
425	ld	%r5,PCB_CR(%r3)		/* Load the condition register */
426	mtcr	%r5
427	ld	%r5,PCB_LR(%r3)		/* Load the link register */
428	mtsrr0	%r5
429	ld	%r1,PCB_SP(%r3)		/* Load the stack pointer */
430	ld	%r2,PCB_TOC(%r3)	/* Load the TOC pointer */
431
432	rfid
433
434/*
435 * This code gets copied to all the trap vectors
436 * (except ISI/DSI, ALI, and the interrupts). Has to fit in 8 instructions!
437 */
438
439	.globl	CNAME(trapcode),CNAME(trapcodeend)
440	.p2align 3
441CNAME(trapcode):
442	mtsprg1	%r1			/* save SP */
443	mflr	%r1			/* Save the old LR in r1 */
444	mtsprg2 %r1			/* And then in SPRG2 */
445	ld	%r1,TRAP_GENTRAP(0)
446	mtlr	%r1
447	li	%r1, 0xe0		/* How to get the vector from LR */
448	blrl				/* Branch to generictrap */
449CNAME(trapcodeend):
450
451/* Same thing for traps setting HSRR0/HSS1 */
452	.globl	CNAME(hypertrapcode),CNAME(hypertrapcodeend)
453	.p2align 3
454CNAME(hypertrapcode):
455	mtsprg1	%r1			/* save SP */
456	mflr	%r1			/* Save the old LR in r1 */
457	mtsprg2 %r1			/* And then in SPRG2 */
458	ld	%r1,TRAP_GENTRAP(0)
459	addi	%r1,%r1,(generichypertrap-generictrap)
460	mtlr	%r1
461	li	%r1, 0xe0		/* How to get the vector from LR */
462	blrl				/* Branch to generictrap */
463CNAME(hypertrapcodeend):
464
465/*
466 * For SLB misses: do special things for the kernel
467 *
468 * Note: SPRG1 is always safe to overwrite any time the MMU is on, which is
469 * the only time this can be called.
470 */
471	.globl	CNAME(slbtrap),CNAME(slbtrapend)
472	.p2align 3
473CNAME(slbtrap):
474	mtsprg1	%r1			/* save SP */
475	GET_CPUINFO(%r1)
476	std	%r2,(PC_SLBSAVE+16)(%r1)
477	mfcr	%r2			/* save CR */
478	std	%r2,(PC_SLBSAVE+104)(%r1)
479	mfsrr1	%r2			/* test kernel mode */
480	mtcr	%r2
481	bf	17,2f			/* branch if PSL_PR is false */
482	/* User mode */
483	ld	%r2,(PC_SLBSAVE+104)(%r1) /* Restore CR */
484	mtcr	%r2
485	ld	%r2,(PC_SLBSAVE+16)(%r1) /* Restore R2 */
486	mflr	%r1			/* Save the old LR in r1 */
487	mtsprg2 %r1			/* And then in SPRG2 */
488					/* 52 bytes so far */
489	bl	1f
490	.llong	generictrap
4911:	mflr	%r1
492	ld	%r1,0(%r1)
493	mtlr	%r1
494	li	%r1, 0x80		/* How to get the vector from LR */
495	blrl				/* Branch to generictrap */
496					/* 84 bytes */
4972:	mflr	%r2			/* Save the old LR in r2 */
498	nop
499	bl	3f			/* Begin dance to jump to kern_slbtrap*/
500	.llong	kern_slbtrap
5013:	mflr	%r1
502	ld	%r1,0(%r1)
503	mtlr	%r1
504	GET_CPUINFO(%r1)
505	blrl				/* 124 bytes -- 4 to spare */
506CNAME(slbtrapend):
507
508kern_slbtrap:
509	std	%r2,(PC_SLBSAVE+136)(%r1) /* old LR */
510	std	%r3,(PC_SLBSAVE+24)(%r1) /* save R3 */
511
512	/* Check if this needs to be handled as a regular trap (userseg miss) */
513	mflr	%r2
514	andi.	%r2,%r2,0xff80
515	cmpwi	%r2,0x380
516	bne	1f
517	mfdar	%r2
518	b	2f
5191:	mfsrr0	%r2
5202:	/* r2 now contains the fault address */
521	lis	%r3,SEGMENT_MASK@highesta
522	ori	%r3,%r3,SEGMENT_MASK@highera
523	sldi	%r3,%r3,32
524	oris	%r3,%r3,SEGMENT_MASK@ha
525	ori	%r3,%r3,SEGMENT_MASK@l
526	and	%r2,%r2,%r3	/* R2 = segment base address */
527	lis	%r3,USER_ADDR@highesta
528	ori	%r3,%r3,USER_ADDR@highera
529	sldi	%r3,%r3,32
530	oris	%r3,%r3,USER_ADDR@ha
531	ori	%r3,%r3,USER_ADDR@l
532	cmpd	%r2,%r3		/* Compare fault base to USER_ADDR */
533	bne	3f
534
535	/* User seg miss, handle as a regular trap */
536	ld	%r2,(PC_SLBSAVE+104)(%r1) /* Restore CR */
537	mtcr	%r2
538	ld	%r2,(PC_SLBSAVE+16)(%r1) /* Restore R2,R3 */
539	ld	%r3,(PC_SLBSAVE+24)(%r1)
540	ld	%r1,(PC_SLBSAVE+136)(%r1) /* Save the old LR in r1 */
541	mtsprg2 %r1			/* And then in SPRG2 */
542	li	%r1, 0x80		/* How to get the vector from LR */
543	b	generictrap		/* Retain old LR using b */
544
5453:	/* Real kernel SLB miss */
546	std	%r0,(PC_SLBSAVE+0)(%r1)	/* free all volatile regs */
547	mfsprg1	%r2			/* Old R1 */
548	std	%r2,(PC_SLBSAVE+8)(%r1)
549	/* R2,R3 already saved */
550	std	%r4,(PC_SLBSAVE+32)(%r1)
551	std	%r5,(PC_SLBSAVE+40)(%r1)
552	std	%r6,(PC_SLBSAVE+48)(%r1)
553	std	%r7,(PC_SLBSAVE+56)(%r1)
554	std	%r8,(PC_SLBSAVE+64)(%r1)
555	std	%r9,(PC_SLBSAVE+72)(%r1)
556	std	%r10,(PC_SLBSAVE+80)(%r1)
557	std	%r11,(PC_SLBSAVE+88)(%r1)
558	std	%r12,(PC_SLBSAVE+96)(%r1)
559	/* CR already saved */
560	mfxer	%r2			/* save XER */
561	std	%r2,(PC_SLBSAVE+112)(%r1)
562	mflr	%r2			/* save LR (SP already saved) */
563	std	%r2,(PC_SLBSAVE+120)(%r1)
564	mfctr	%r2			/* save CTR */
565	std	%r2,(PC_SLBSAVE+128)(%r1)
566
567	/* Call handler */
568	addi	%r1,%r1,PC_SLBSTACK-48+1024
569	li	%r2,~15
570	and	%r1,%r1,%r2
571	GET_TOCBASE(%r2)
572	mflr	%r3
573	andi.	%r3,%r3,0xff80
574	mfdar	%r4
575	mfsrr0	%r5
576	bl	handle_kernel_slb_spill
577	nop
578
579	/* Save r28-31, restore r4-r12 */
580	GET_CPUINFO(%r1)
581	ld	%r4,(PC_SLBSAVE+32)(%r1)
582	ld	%r5,(PC_SLBSAVE+40)(%r1)
583	ld	%r6,(PC_SLBSAVE+48)(%r1)
584	ld	%r7,(PC_SLBSAVE+56)(%r1)
585	ld	%r8,(PC_SLBSAVE+64)(%r1)
586	ld	%r9,(PC_SLBSAVE+72)(%r1)
587	ld	%r10,(PC_SLBSAVE+80)(%r1)
588	ld	%r11,(PC_SLBSAVE+88)(%r1)
589	ld	%r12,(PC_SLBSAVE+96)(%r1)
590	std	%r28,(PC_SLBSAVE+64)(%r1)
591	std	%r29,(PC_SLBSAVE+72)(%r1)
592	std	%r30,(PC_SLBSAVE+80)(%r1)
593	std	%r31,(PC_SLBSAVE+88)(%r1)
594
595	/* Restore kernel mapping */
596	bl	restore_kernsrs
597
598	/* Restore remaining registers */
599	ld	%r28,(PC_SLBSAVE+64)(%r1)
600	ld	%r29,(PC_SLBSAVE+72)(%r1)
601	ld	%r30,(PC_SLBSAVE+80)(%r1)
602	ld	%r31,(PC_SLBSAVE+88)(%r1)
603
604	ld	%r2,(PC_SLBSAVE+104)(%r1)
605	mtcr	%r2
606	ld	%r2,(PC_SLBSAVE+112)(%r1)
607	mtxer	%r2
608	ld	%r2,(PC_SLBSAVE+120)(%r1)
609	mtlr	%r2
610	ld	%r2,(PC_SLBSAVE+128)(%r1)
611	mtctr	%r2
612	ld	%r2,(PC_SLBSAVE+136)(%r1)
613	mtlr	%r2
614
615	/* Restore r0-r3 */
616	ld	%r0,(PC_SLBSAVE+0)(%r1)
617	ld	%r2,(PC_SLBSAVE+16)(%r1)
618	ld	%r3,(PC_SLBSAVE+24)(%r1)
619	mfsprg1	%r1
620
621	/* Back to whatever we were doing */
622	rfid
623
624/*
625 * For ALI: has to save DSISR and DAR
626 */
627	.globl	CNAME(alitrap),CNAME(aliend)
628CNAME(alitrap):
629	mtsprg1	%r1			/* save SP */
630	GET_CPUINFO(%r1)
631	std	%r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)	/* free r27-r31 */
632	std	%r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
633	std	%r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
634	std	%r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
635	std	%r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
636	mfdar	%r30
637	mfdsisr	%r31
638	std	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
639	std	%r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1)
640	mfsprg1	%r1			/* restore SP, in case of branch */
641	mflr	%r28			/* save LR */
642	mfcr	%r29			/* save CR */
643
644	/* Begin dance to branch to s_trap in a bit */
645	b	1f
646	.p2align 3
6471:	nop
648	bl	1f
649	.llong	s_trap
6501:	mflr	%r31
651	ld	%r31,0(%r31)
652	mtlr	%r31
653
654	/* Put our exception vector in SPRG3 */
655	li	%r31, EXC_ALI
656	mtsprg3	%r31
657
658	/* Test whether we already had PR set */
659	mfsrr1	%r31
660	mtcr	%r31
661	blrl
662CNAME(aliend):
663
664/*
665 * Similar to the above for DSI
666 * Has to handle standard pagetable spills
667 */
668	.globl	CNAME(dsitrap),CNAME(dsiend)
669	.p2align 3
670CNAME(dsitrap):
671	mtsprg1	%r1			/* save SP */
672	GET_CPUINFO(%r1)
673	std	%r27,(PC_DISISAVE+CPUSAVE_R27)(%r1)	/* free r27-r31 */
674	std	%r28,(PC_DISISAVE+CPUSAVE_R28)(%r1)
675	std	%r29,(PC_DISISAVE+CPUSAVE_R29)(%r1)
676	std	%r30,(PC_DISISAVE+CPUSAVE_R30)(%r1)
677	std	%r31,(PC_DISISAVE+CPUSAVE_R31)(%r1)
678	mfcr	%r29			/* save CR */
679	mfxer	%r30			/* save XER */
680	mtsprg2	%r30			/* in SPRG2 */
681	mfsrr1	%r31			/* test kernel mode */
682	mtcr	%r31
683	mflr	%r28			/* save LR (SP already saved) */
684	bl	1f			/* Begin branching to disitrap */
685	.llong	disitrap
6861:	mflr	%r1
687	ld	%r1,0(%r1)
688	mtlr	%r1
689	blrl				/* Branch to generictrap */
690CNAME(dsiend):
691
692/*
693 * Preamble code for DSI/ISI traps
694 */
695disitrap:
696	/* Write the trap vector to SPRG3 by computing LR & 0xff00 */
697	mflr	%r1
698	andi.	%r1,%r1,0xff00
699	mtsprg3	%r1
700
701	GET_CPUINFO(%r1)
702	ld	%r31,(PC_DISISAVE+CPUSAVE_R27)(%r1)
703	std	%r31,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)
704	ld	%r30,(PC_DISISAVE+CPUSAVE_R28)(%r1)
705	std	%r30,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
706	ld	%r31,(PC_DISISAVE+CPUSAVE_R29)(%r1)
707	std	%r31,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
708	ld	%r30,(PC_DISISAVE+CPUSAVE_R30)(%r1)
709	std	%r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
710	ld	%r31,(PC_DISISAVE+CPUSAVE_R31)(%r1)
711	std	%r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
712	mfdar	%r30
713	mfdsisr	%r31
714	std	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
715	std	%r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1)
716
717#ifdef KDB
718	/* Try to detect a kernel stack overflow */
719	mfsrr1	%r31
720	mtcr	%r31
721	bt	17,realtrap		/* branch is user mode */
722	mfsprg1	%r31			/* get old SP */
723	clrrdi	%r31,%r31,12		/* Round SP down to nearest page */
724	sub.	%r30,%r31,%r30		/* SP - DAR */
725	bge	1f
726	neg	%r30,%r30		/* modulo value */
7271:	cmpldi	%cr0,%r30,4096		/* is DAR within a page of SP? */
728	bge	%cr0,realtrap		/* no, too far away. */
729
730	/* Now convert this DSI into a DDB trap.  */
731	GET_CPUINFO(%r1)
732	ld	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1) /* get DAR */
733	std	%r30,(PC_DBSAVE  +CPUSAVE_AIM_DAR)(%r1) /* save DAR */
734	ld	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1) /* get DSISR */
735	std	%r30,(PC_DBSAVE  +CPUSAVE_AIM_DSISR)(%r1) /* save DSISR */
736	ld	%r31,(PC_DISISAVE+CPUSAVE_R27)(%r1) /* get  r27 */
737	std	%r31,(PC_DBSAVE  +CPUSAVE_R27)(%r1) /* save r27 */
738	ld	%r30,(PC_DISISAVE+CPUSAVE_R28)(%r1) /* get  r28 */
739	std	%r30,(PC_DBSAVE  +CPUSAVE_R28)(%r1) /* save r28 */
740	ld	%r31,(PC_DISISAVE+CPUSAVE_R29)(%r1) /* get  r29 */
741	std	%r31,(PC_DBSAVE  +CPUSAVE_R29)(%r1) /* save r29 */
742	ld	%r30,(PC_DISISAVE+CPUSAVE_R30)(%r1) /* get  r30 */
743	std	%r30,(PC_DBSAVE  +CPUSAVE_R30)(%r1) /* save r30 */
744	ld	%r31,(PC_DISISAVE+CPUSAVE_R31)(%r1) /* get  r31 */
745	std	%r31,(PC_DBSAVE  +CPUSAVE_R31)(%r1) /* save r31 */
746	b	dbtrap
747#endif
748
749	/* XXX need stack probe here */
750realtrap:
751/* Test whether we already had PR set */
752	mfsrr1	%r1
753	mtcr	%r1
754	mfsprg1	%r1			/* restore SP (might have been
755					   overwritten) */
756	bf	17,k_trap		/* branch if PSL_PR is false */
757	GET_CPUINFO(%r1)
758	ld	%r1,PC_CURPCB(%r1)
759	mr	%r27,%r28		/* Save LR, r29 */
760	mtsprg2	%r29
761	bl	restore_kernsrs		/* enable kernel mapping */
762	mfsprg2	%r29
763	mr	%r28,%r27
764	b	s_trap
765
766/*
767 * generictrap does some standard setup for trap handling to minimize
768 * the code that need be installed in the actual vectors. It expects
769 * the following conditions.
770 *
771 * R1 - Trap vector = LR & (0xff00 | R1)
772 * SPRG1 - Original R1 contents
773 * SPRG2 - Original LR
774 */
775
776generichypertrap:
777	mtsprg3 %r1
778	mfspr	%r1, SPR_HSRR0
779	mtsrr0	%r1
780	mfspr	%r1, SPR_HSRR1
781	mtsrr1	%r1
782	mfsprg3	%r1
783	.globl	CNAME(generictrap)
784generictrap:
785	/* Save R1 for computing the exception vector */
786	mtsprg3 %r1
787
788	/* Save interesting registers */
789	GET_CPUINFO(%r1)
790	std	%r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)	/* free r27-r31 */
791	std	%r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
792	std	%r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
793	std	%r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
794	std	%r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
795	mfdar	%r30
796	std	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
797	mfsprg1	%r1			/* restore SP, in case of branch */
798	mfsprg2	%r28			/* save LR */
799	mfcr	%r29			/* save CR */
800
801	/* Compute the exception vector from the link register */
802	mfsprg3 %r31
803	ori	%r31,%r31,0xff00
804	mflr	%r30
805	addi	%r30,%r30,-4 /* The branch instruction, not the next */
806	and	%r30,%r30,%r31
807	mtsprg3	%r30
808
809	/* Test whether we already had PR set */
810	mfsrr1	%r31
811	mtcr	%r31
812
813s_trap:
814	bf	17,k_trap		/* branch if PSL_PR is false */
815	GET_CPUINFO(%r1)
816u_trap:
817	ld	%r1,PC_CURPCB(%r1)
818	mr	%r27,%r28		/* Save LR, r29 */
819	mtsprg2	%r29
820	bl	restore_kernsrs		/* enable kernel mapping */
821	mfsprg2	%r29
822	mr	%r28,%r27
823
824/*
825 * Now the common trap catching code.
826 */
827k_trap:
828	FRAME_SETUP(PC_TEMPSAVE)
829/* Call C interrupt dispatcher: */
830trapagain:
831	GET_TOCBASE(%r2)
832	addi	%r3,%r1,48
833	bl	CNAME(powerpc_interrupt)
834	nop
835
836	.globl	CNAME(trapexit)	/* backtrace code sentinel */
837CNAME(trapexit):
838/* Disable interrupts: */
839	mfmsr	%r3
840	andi.	%r3,%r3,~PSL_EE@l
841	mtmsr	%r3
842	isync
843/* Test AST pending: */
844	ld	%r5,FRAME_SRR1+48(%r1)
845	mtcr	%r5
846	bf	17,1f			/* branch if PSL_PR is false */
847
848	GET_CPUINFO(%r3)		/* get per-CPU pointer */
849	lwz	%r4, TD_FLAGS(%r13)	/* get thread flags value */
850	lis	%r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@h
851	ori	%r5,%r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@l
852	and.	%r4,%r4,%r5
853	beq	1f
854	mfmsr	%r3			/* re-enable interrupts */
855	ori	%r3,%r3,PSL_EE@l
856	mtmsr	%r3
857	isync
858	GET_TOCBASE(%r2)
859	addi	%r3,%r1,48
860	bl	CNAME(ast)
861	nop
862	.globl	CNAME(asttrapexit)	/* backtrace code sentinel #2 */
863CNAME(asttrapexit):
864	b	trapexit		/* test ast ret value ? */
8651:
866	FRAME_LEAVE(PC_TEMPSAVE)
867	rfid
868
869#if defined(KDB)
870/*
871 * Deliberate entry to dbtrap
872 */
873ASENTRY_NOPROF(breakpoint)
874	mtsprg1	%r1
875	mfmsr	%r3
876	mtsrr1	%r3
877	andi.	%r3,%r3,~(PSL_EE|PSL_ME)@l
878	mtmsr	%r3			/* disable interrupts */
879	isync
880	GET_CPUINFO(%r3)
881	std	%r27,(PC_DBSAVE+CPUSAVE_R27)(%r3)
882	std	%r28,(PC_DBSAVE+CPUSAVE_R28)(%r3)
883	std	%r29,(PC_DBSAVE+CPUSAVE_R29)(%r3)
884	std	%r30,(PC_DBSAVE+CPUSAVE_R30)(%r3)
885	std	%r31,(PC_DBSAVE+CPUSAVE_R31)(%r3)
886	mflr	%r28
887	li	%r29,EXC_BPT
888	mtlr	%r29
889	mfcr	%r29
890	mtsrr0	%r28
891
892/*
893 * Now the kdb trap catching code.
894 */
895dbtrap:
896	/* Write the trap vector to SPRG3 by computing LR & 0xff00 */
897	mflr	%r1
898	andi.	%r1,%r1,0xff00
899	mtsprg3	%r1
900
901	GET_TOCBASE(%r1)			/* get new SP */
902	addis	%r1,%r1,TOC_REF(trapstk)@ha
903	ld	%r1,TOC_REF(trapstk)@l(%r1)
904	addi	%r1,%r1,(TRAPSTKSZ-48)
905
906	FRAME_SETUP(PC_DBSAVE)
907/* Call C trap code: */
908	GET_TOCBASE(%r2)
909	addi	%r3,%r1,48
910	bl	CNAME(db_trap_glue)
911	nop
912	or.	%r3,%r3,%r3
913	bne	dbleave
914/* This wasn't for KDB, so switch to real trap: */
915	ld	%r3,FRAME_EXC+48(%r1)	/* save exception */
916	GET_CPUINFO(%r4)
917	std	%r3,(PC_DBSAVE+CPUSAVE_R31)(%r4)
918	FRAME_LEAVE(PC_DBSAVE)
919	mtsprg1	%r1			/* prepare for entrance to realtrap */
920	GET_CPUINFO(%r1)
921	std	%r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)
922	std	%r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
923	std	%r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
924	std	%r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
925	std	%r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
926	mflr	%r28
927	mfcr	%r29
928	ld	%r31,(PC_DBSAVE+CPUSAVE_R31)(%r1)
929	mtsprg3	%r31			/* SPRG3 was clobbered by FRAME_LEAVE */
930	mfsprg1	%r1
931	b	realtrap
932dbleave:
933	FRAME_LEAVE(PC_DBSAVE)
934	rfid
935
936/*
937 * In case of KDB we want a separate trap catcher for it
938 */
939	.globl	CNAME(dblow),CNAME(dbend)
940	.p2align 3
941CNAME(dblow):
942	mtsprg1	%r1			/* save SP */
943	mtsprg2	%r29			/* save r29 */
944	mfcr	%r29			/* save CR in r29 */
945	mfsrr1	%r1
946	mtcr	%r1
947	bf	17,1f			/* branch if privileged */
948
949	/* Unprivileged case */
950	mtcr	%r29			/* put the condition register back */
951        mfsprg2	%r29			/* ... and r29 */
952        mflr	%r1			/* save LR */
953	mtsprg2 %r1			/* And then in SPRG2 */
954
955	ld	%r1, TRAP_GENTRAP(0)	/* Get branch address */
956	mtlr	%r1
957	li	%r1, 0	 		/* How to get the vector from LR */
958	blrl				/* Branch to generictrap */
959
9601:
961	GET_CPUINFO(%r1)
962	std	%r27,(PC_DBSAVE+CPUSAVE_R27)(%r1)	/* free r27 */
963	std	%r28,(PC_DBSAVE+CPUSAVE_R28)(%r1)	/* free r28 */
964        mfsprg2	%r28				/* r29 holds cr...  */
965        std	%r28,(PC_DBSAVE+CPUSAVE_R29)(%r1)	/* free r29 */
966        std	%r30,(PC_DBSAVE+CPUSAVE_R30)(%r1)	/* free r30 */
967        std	%r31,(PC_DBSAVE+CPUSAVE_R31)(%r1)	/* free r31 */
968        mflr	%r28					/* save LR */
969	nop						/* alignment */
970	bl	9f					/* Begin branch */
971	.llong	dbtrap
9729:	mflr	%r1
973	ld	%r1,0(%r1)
974	mtlr	%r1
975	blrl				/* Branch to generictrap */
976CNAME(dbend):
977#endif /* KDB */
978