xref: /linux/arch/powerpc/kernel/exceptions-64s.S (revision 273b281fa22c293963ee3e6eec418f5dda2dbc83)
1/*
2 * This file contains the 64-bit "server" PowerPC variant
3 * of the low level exception handling including exception
4 * vectors, exception return, part of the slb and stab
5 * handling and other fixed offset specific things.
6 *
7 * This file is meant to be #included from head_64.S due to
8 * position dependant assembly.
9 *
10 * Most of this originates from head_64.S and thus has the same
11 * copyright history.
12 *
13 */
14
15#include <asm/exception-64s.h>
16
17/*
18 * We layout physical memory as follows:
19 * 0x0000 - 0x00ff : Secondary processor spin code
20 * 0x0100 - 0x2fff : pSeries Interrupt prologs
21 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
22 * 0x6000 - 0x6fff : Initial (CPU0) segment table
23 * 0x7000 - 0x7fff : FWNMI data area
24 * 0x8000 -        : Early init and support code
25 */
26
27/*
28 * This is the start of the interrupt handlers for pSeries
29 * This code runs with relocation off.
30 * Code from here to __end_interrupts gets copied down to real
31 * address 0x100 when we are running a relocatable kernel.
32 * Therefore any relative branches in this section must only
33 * branch to labels in this section.
34 */
35	. = 0x100
36	.globl __start_interrupts
37__start_interrupts:
38
39	STD_EXCEPTION_PSERIES(0x100, system_reset)
40
41	. = 0x200
42_machine_check_pSeries:
43	HMT_MEDIUM
44	mtspr	SPRN_SPRG_SCRATCH0,r13		/* save r13 */
45	EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
46
47	. = 0x300
48	.globl data_access_pSeries
49data_access_pSeries:
50	HMT_MEDIUM
51	mtspr	SPRN_SPRG_SCRATCH0,r13
52BEGIN_FTR_SECTION
53	mfspr	r13,SPRN_SPRG_PACA
54	std	r9,PACA_EXSLB+EX_R9(r13)
55	std	r10,PACA_EXSLB+EX_R10(r13)
56	mfspr	r10,SPRN_DAR
57	mfspr	r9,SPRN_DSISR
58	srdi	r10,r10,60
59	rlwimi	r10,r9,16,0x20
60	mfcr	r9
61	cmpwi	r10,0x2c
62	beq	do_stab_bolted_pSeries
63	ld	r10,PACA_EXSLB+EX_R10(r13)
64	std	r11,PACA_EXGEN+EX_R11(r13)
65	ld	r11,PACA_EXSLB+EX_R9(r13)
66	std	r12,PACA_EXGEN+EX_R12(r13)
67	mfspr	r12,SPRN_SPRG_SCRATCH0
68	std	r10,PACA_EXGEN+EX_R10(r13)
69	std	r11,PACA_EXGEN+EX_R9(r13)
70	std	r12,PACA_EXGEN+EX_R13(r13)
71	EXCEPTION_PROLOG_PSERIES_1(data_access_common)
72FTR_SECTION_ELSE
73	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
74ALT_FTR_SECTION_END_IFCLR(CPU_FTR_SLB)
75
76	. = 0x380
77	.globl data_access_slb_pSeries
78data_access_slb_pSeries:
79	HMT_MEDIUM
80	mtspr	SPRN_SPRG_SCRATCH0,r13
81	mfspr	r13,SPRN_SPRG_PACA		/* get paca address into r13 */
82	std	r3,PACA_EXSLB+EX_R3(r13)
83	mfspr	r3,SPRN_DAR
84	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
85	mfcr	r9
86#ifdef __DISABLED__
87	/* Keep that around for when we re-implement dynamic VSIDs */
88	cmpdi	r3,0
89	bge	slb_miss_user_pseries
90#endif /* __DISABLED__ */
91	std	r10,PACA_EXSLB+EX_R10(r13)
92	std	r11,PACA_EXSLB+EX_R11(r13)
93	std	r12,PACA_EXSLB+EX_R12(r13)
94	mfspr	r10,SPRN_SPRG_SCRATCH0
95	std	r10,PACA_EXSLB+EX_R13(r13)
96	mfspr	r12,SPRN_SRR1		/* and SRR1 */
97#ifndef CONFIG_RELOCATABLE
98	b	.slb_miss_realmode
99#else
100	/*
101	 * We can't just use a direct branch to .slb_miss_realmode
102	 * because the distance from here to there depends on where
103	 * the kernel ends up being put.
104	 */
105	mfctr	r11
106	ld	r10,PACAKBASE(r13)
107	LOAD_HANDLER(r10, .slb_miss_realmode)
108	mtctr	r10
109	bctr
110#endif
111
112	STD_EXCEPTION_PSERIES(0x400, instruction_access)
113
114	. = 0x480
115	.globl instruction_access_slb_pSeries
116instruction_access_slb_pSeries:
117	HMT_MEDIUM
118	mtspr	SPRN_SPRG_SCRATCH0,r13
119	mfspr	r13,SPRN_SPRG_PACA		/* get paca address into r13 */
120	std	r3,PACA_EXSLB+EX_R3(r13)
121	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
122	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
123	mfcr	r9
124#ifdef __DISABLED__
125	/* Keep that around for when we re-implement dynamic VSIDs */
126	cmpdi	r3,0
127	bge	slb_miss_user_pseries
128#endif /* __DISABLED__ */
129	std	r10,PACA_EXSLB+EX_R10(r13)
130	std	r11,PACA_EXSLB+EX_R11(r13)
131	std	r12,PACA_EXSLB+EX_R12(r13)
132	mfspr	r10,SPRN_SPRG_SCRATCH0
133	std	r10,PACA_EXSLB+EX_R13(r13)
134	mfspr	r12,SPRN_SRR1		/* and SRR1 */
135#ifndef CONFIG_RELOCATABLE
136	b	.slb_miss_realmode
137#else
138	mfctr	r11
139	ld	r10,PACAKBASE(r13)
140	LOAD_HANDLER(r10, .slb_miss_realmode)
141	mtctr	r10
142	bctr
143#endif
144
145	MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
146	STD_EXCEPTION_PSERIES(0x600, alignment)
147	STD_EXCEPTION_PSERIES(0x700, program_check)
148	STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
149	MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
150	STD_EXCEPTION_PSERIES(0xa00, trap_0a)
151	STD_EXCEPTION_PSERIES(0xb00, trap_0b)
152
153	. = 0xc00
154	.globl	system_call_pSeries
155system_call_pSeries:
156	HMT_MEDIUM
157BEGIN_FTR_SECTION
158	cmpdi	r0,0x1ebe
159	beq-	1f
160END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
161	mr	r9,r13
162	mfspr	r13,SPRN_SPRG_PACA
163	mfspr	r11,SPRN_SRR0
164	ld	r12,PACAKBASE(r13)
165	ld	r10,PACAKMSR(r13)
166	LOAD_HANDLER(r12, system_call_entry)
167	mtspr	SPRN_SRR0,r12
168	mfspr	r12,SPRN_SRR1
169	mtspr	SPRN_SRR1,r10
170	rfid
171	b	.	/* prevent speculative execution */
172
173/* Fast LE/BE switch system call */
1741:	mfspr	r12,SPRN_SRR1
175	xori	r12,r12,MSR_LE
176	mtspr	SPRN_SRR1,r12
177	rfid		/* return to userspace */
178	b	.
179
180	STD_EXCEPTION_PSERIES(0xd00, single_step)
181	STD_EXCEPTION_PSERIES(0xe00, trap_0e)
182
183	/* We need to deal with the Altivec unavailable exception
184	 * here which is at 0xf20, thus in the middle of the
185	 * prolog code of the PerformanceMonitor one. A little
186	 * trickery is thus necessary
187	 */
188performance_monitor_pSeries_1:
189	. = 0xf00
190	b	performance_monitor_pSeries
191
192altivec_unavailable_pSeries_1:
193	. = 0xf20
194	b	altivec_unavailable_pSeries
195
196vsx_unavailable_pSeries_1:
197	. = 0xf40
198	b	vsx_unavailable_pSeries
199
200#ifdef CONFIG_CBE_RAS
201	HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
202#endif /* CONFIG_CBE_RAS */
203	STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
204#ifdef CONFIG_CBE_RAS
205	HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance)
206#endif /* CONFIG_CBE_RAS */
207	STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
208#ifdef CONFIG_CBE_RAS
209	HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal)
210#endif /* CONFIG_CBE_RAS */
211
212	. = 0x3000
213
214/*** pSeries interrupt support ***/
215
216	/* moved from 0xf00 */
217	STD_EXCEPTION_PSERIES(., performance_monitor)
218	STD_EXCEPTION_PSERIES(., altivec_unavailable)
219	STD_EXCEPTION_PSERIES(., vsx_unavailable)
220
221/*
222 * An interrupt came in while soft-disabled; clear EE in SRR1,
223 * clear paca->hard_enabled and return.
224 */
225masked_interrupt:
226	stb	r10,PACAHARDIRQEN(r13)
227	mtcrf	0x80,r9
228	ld	r9,PACA_EXGEN+EX_R9(r13)
229	mfspr	r10,SPRN_SRR1
230	rldicl	r10,r10,48,1		/* clear MSR_EE */
231	rotldi	r10,r10,16
232	mtspr	SPRN_SRR1,r10
233	ld	r10,PACA_EXGEN+EX_R10(r13)
234	mfspr	r13,SPRN_SPRG_SCRATCH0
235	rfid
236	b	.
237
238	.align	7
239do_stab_bolted_pSeries:
240	std	r11,PACA_EXSLB+EX_R11(r13)
241	std	r12,PACA_EXSLB+EX_R12(r13)
242	mfspr	r10,SPRN_SPRG_SCRATCH0
243	std	r10,PACA_EXSLB+EX_R13(r13)
244	EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted)
245
246#ifdef CONFIG_PPC_PSERIES
247/*
248 * Vectors for the FWNMI option.  Share common code.
249 */
250	.globl system_reset_fwnmi
251      .align 7
252system_reset_fwnmi:
253	HMT_MEDIUM
254	mtspr	SPRN_SPRG_SCRATCH0,r13		/* save r13 */
255	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
256
257	.globl machine_check_fwnmi
258      .align 7
259machine_check_fwnmi:
260	HMT_MEDIUM
261	mtspr	SPRN_SPRG_SCRATCH0,r13		/* save r13 */
262	EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
263
264#endif /* CONFIG_PPC_PSERIES */
265
266#ifdef __DISABLED__
267/*
268 * This is used for when the SLB miss handler has to go virtual,
269 * which doesn't happen for now anymore but will once we re-implement
270 * dynamic VSIDs for shared page tables
271 */
272slb_miss_user_pseries:
273	std	r10,PACA_EXGEN+EX_R10(r13)
274	std	r11,PACA_EXGEN+EX_R11(r13)
275	std	r12,PACA_EXGEN+EX_R12(r13)
276	mfspr	r10,SPRG_SCRATCH0
277	ld	r11,PACA_EXSLB+EX_R9(r13)
278	ld	r12,PACA_EXSLB+EX_R3(r13)
279	std	r10,PACA_EXGEN+EX_R13(r13)
280	std	r11,PACA_EXGEN+EX_R9(r13)
281	std	r12,PACA_EXGEN+EX_R3(r13)
282	clrrdi	r12,r13,32
283	mfmsr	r10
284	mfspr	r11,SRR0			/* save SRR0 */
285	ori	r12,r12,slb_miss_user_common@l	/* virt addr of handler */
286	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
287	mtspr	SRR0,r12
288	mfspr	r12,SRR1			/* and SRR1 */
289	mtspr	SRR1,r10
290	rfid
291	b	.				/* prevent spec. execution */
292#endif /* __DISABLED__ */
293
294	.align	7
295	.globl	__end_interrupts
296__end_interrupts:
297
298/*
299 * Code from here down to __end_handlers is invoked from the
300 * exception prologs above.  Because the prologs assemble the
301 * addresses of these handlers using the LOAD_HANDLER macro,
302 * which uses an addi instruction, these handlers must be in
303 * the first 32k of the kernel image.
304 */
305
306/*** Common interrupt handlers ***/
307
308	STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
309
310	/*
311	 * Machine check is different because we use a different
312	 * save area: PACA_EXMC instead of PACA_EXGEN.
313	 */
314	.align	7
315	.globl machine_check_common
316machine_check_common:
317	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
318	FINISH_NAP
319	DISABLE_INTS
320	bl	.save_nvgprs
321	addi	r3,r1,STACK_FRAME_OVERHEAD
322	bl	.machine_check_exception
323	b	.ret_from_except
324
325	STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
326	STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
327	STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
328	STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
329	STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
330	STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
331	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
332#ifdef CONFIG_ALTIVEC
333	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
334#else
335	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
336#endif
337#ifdef CONFIG_CBE_RAS
338	STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
339	STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
340	STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
341#endif /* CONFIG_CBE_RAS */
342
343	.align	7
344system_call_entry:
345	b	system_call_common
346
347/*
348 * Here we have detected that the kernel stack pointer is bad.
349 * R9 contains the saved CR, r13 points to the paca,
350 * r10 contains the (bad) kernel stack pointer,
351 * r11 and r12 contain the saved SRR0 and SRR1.
352 * We switch to using an emergency stack, save the registers there,
353 * and call kernel_bad_stack(), which panics.
354 */
355bad_stack:
356	ld	r1,PACAEMERGSP(r13)
357	subi	r1,r1,64+INT_FRAME_SIZE
358	std	r9,_CCR(r1)
359	std	r10,GPR1(r1)
360	std	r11,_NIP(r1)
361	std	r12,_MSR(r1)
362	mfspr	r11,SPRN_DAR
363	mfspr	r12,SPRN_DSISR
364	std	r11,_DAR(r1)
365	std	r12,_DSISR(r1)
366	mflr	r10
367	mfctr	r11
368	mfxer	r12
369	std	r10,_LINK(r1)
370	std	r11,_CTR(r1)
371	std	r12,_XER(r1)
372	SAVE_GPR(0,r1)
373	SAVE_GPR(2,r1)
374	SAVE_4GPRS(3,r1)
375	SAVE_2GPRS(7,r1)
376	SAVE_10GPRS(12,r1)
377	SAVE_10GPRS(22,r1)
378	lhz	r12,PACA_TRAP_SAVE(r13)
379	std	r12,_TRAP(r1)
380	addi	r11,r1,INT_FRAME_SIZE
381	std	r11,0(r1)
382	li	r12,0
383	std	r12,0(r11)
384	ld	r2,PACATOC(r13)
3851:	addi	r3,r1,STACK_FRAME_OVERHEAD
386	bl	.kernel_bad_stack
387	b	1b
388
389/*
390 * Here r13 points to the paca, r9 contains the saved CR,
391 * SRR0 and SRR1 are saved in r11 and r12,
392 * r9 - r13 are saved in paca->exgen.
393 */
394	.align	7
395	.globl data_access_common
396data_access_common:
397	mfspr	r10,SPRN_DAR
398	std	r10,PACA_EXGEN+EX_DAR(r13)
399	mfspr	r10,SPRN_DSISR
400	stw	r10,PACA_EXGEN+EX_DSISR(r13)
401	EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
402	ld	r3,PACA_EXGEN+EX_DAR(r13)
403	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
404	li	r5,0x300
405	b	.do_hash_page	 	/* Try to handle as hpte fault */
406
407	.align	7
408	.globl instruction_access_common
409instruction_access_common:
410	EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
411	ld	r3,_NIP(r1)
412	andis.	r4,r12,0x5820
413	li	r5,0x400
414	b	.do_hash_page		/* Try to handle as hpte fault */
415
416/*
417 * Here is the common SLB miss user that is used when going to virtual
418 * mode for SLB misses, that is currently not used
419 */
420#ifdef __DISABLED__
421	.align	7
422	.globl	slb_miss_user_common
423slb_miss_user_common:
424	mflr	r10
425	std	r3,PACA_EXGEN+EX_DAR(r13)
426	stw	r9,PACA_EXGEN+EX_CCR(r13)
427	std	r10,PACA_EXGEN+EX_LR(r13)
428	std	r11,PACA_EXGEN+EX_SRR0(r13)
429	bl	.slb_allocate_user
430
431	ld	r10,PACA_EXGEN+EX_LR(r13)
432	ld	r3,PACA_EXGEN+EX_R3(r13)
433	lwz	r9,PACA_EXGEN+EX_CCR(r13)
434	ld	r11,PACA_EXGEN+EX_SRR0(r13)
435	mtlr	r10
436	beq-	slb_miss_fault
437
438	andi.	r10,r12,MSR_RI		/* check for unrecoverable exception */
439	beq-	unrecov_user_slb
440	mfmsr	r10
441
442.machine push
443.machine "power4"
444	mtcrf	0x80,r9
445.machine pop
446
447	clrrdi	r10,r10,2		/* clear RI before setting SRR0/1 */
448	mtmsrd	r10,1
449
450	mtspr	SRR0,r11
451	mtspr	SRR1,r12
452
453	ld	r9,PACA_EXGEN+EX_R9(r13)
454	ld	r10,PACA_EXGEN+EX_R10(r13)
455	ld	r11,PACA_EXGEN+EX_R11(r13)
456	ld	r12,PACA_EXGEN+EX_R12(r13)
457	ld	r13,PACA_EXGEN+EX_R13(r13)
458	rfid
459	b	.
460
461slb_miss_fault:
462	EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
463	ld	r4,PACA_EXGEN+EX_DAR(r13)
464	li	r5,0
465	std	r4,_DAR(r1)
466	std	r5,_DSISR(r1)
467	b	handle_page_fault
468
469unrecov_user_slb:
470	EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
471	DISABLE_INTS
472	bl	.save_nvgprs
4731:	addi	r3,r1,STACK_FRAME_OVERHEAD
474	bl	.unrecoverable_exception
475	b	1b
476
477#endif /* __DISABLED__ */
478
479
480/*
481 * r13 points to the PACA, r9 contains the saved CR,
482 * r12 contain the saved SRR1, SRR0 is still ready for return
483 * r3 has the faulting address
484 * r9 - r13 are saved in paca->exslb.
485 * r3 is saved in paca->slb_r3
486 * We assume we aren't going to take any exceptions during this procedure.
487 */
488_GLOBAL(slb_miss_realmode)
489	mflr	r10
490#ifdef CONFIG_RELOCATABLE
491	mtctr	r11
492#endif
493
494	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
495	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
496
497	bl	.slb_allocate_realmode
498
499	/* All done -- return from exception. */
500
501	ld	r10,PACA_EXSLB+EX_LR(r13)
502	ld	r3,PACA_EXSLB+EX_R3(r13)
503	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
504#ifdef CONFIG_PPC_ISERIES
505BEGIN_FW_FTR_SECTION
506	ld	r11,PACALPPACAPTR(r13)
507	ld	r11,LPPACASRR0(r11)		/* get SRR0 value */
508END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
509#endif /* CONFIG_PPC_ISERIES */
510
511	mtlr	r10
512
513	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
514	beq-	2f
515
516.machine	push
517.machine	"power4"
518	mtcrf	0x80,r9
519	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
520.machine	pop
521
522#ifdef CONFIG_PPC_ISERIES
523BEGIN_FW_FTR_SECTION
524	mtspr	SPRN_SRR0,r11
525	mtspr	SPRN_SRR1,r12
526END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
527#endif /* CONFIG_PPC_ISERIES */
528	ld	r9,PACA_EXSLB+EX_R9(r13)
529	ld	r10,PACA_EXSLB+EX_R10(r13)
530	ld	r11,PACA_EXSLB+EX_R11(r13)
531	ld	r12,PACA_EXSLB+EX_R12(r13)
532	ld	r13,PACA_EXSLB+EX_R13(r13)
533	rfid
534	b	.	/* prevent speculative execution */
535
5362:
537#ifdef CONFIG_PPC_ISERIES
538BEGIN_FW_FTR_SECTION
539	b	unrecov_slb
540END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
541#endif /* CONFIG_PPC_ISERIES */
542	mfspr	r11,SPRN_SRR0
543	ld	r10,PACAKBASE(r13)
544	LOAD_HANDLER(r10,unrecov_slb)
545	mtspr	SPRN_SRR0,r10
546	ld	r10,PACAKMSR(r13)
547	mtspr	SPRN_SRR1,r10
548	rfid
549	b	.
550
551unrecov_slb:
552	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
553	DISABLE_INTS
554	bl	.save_nvgprs
5551:	addi	r3,r1,STACK_FRAME_OVERHEAD
556	bl	.unrecoverable_exception
557	b	1b
558
559	.align	7
560	.globl hardware_interrupt_common
561	.globl hardware_interrupt_entry
562hardware_interrupt_common:
563	EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
564	FINISH_NAP
565hardware_interrupt_entry:
566	DISABLE_INTS
567BEGIN_FTR_SECTION
568	bl	.ppc64_runlatch_on
569END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
570	addi	r3,r1,STACK_FRAME_OVERHEAD
571	bl	.do_IRQ
572	b	.ret_from_except_lite
573
574#ifdef CONFIG_PPC_970_NAP
575power4_fixup_nap:
576	andc	r9,r9,r10
577	std	r9,TI_LOCAL_FLAGS(r11)
578	ld	r10,_LINK(r1)		/* make idle task do the */
579	std	r10,_NIP(r1)		/* equivalent of a blr */
580	blr
581#endif
582
583	.align	7
584	.globl alignment_common
585alignment_common:
586	mfspr	r10,SPRN_DAR
587	std	r10,PACA_EXGEN+EX_DAR(r13)
588	mfspr	r10,SPRN_DSISR
589	stw	r10,PACA_EXGEN+EX_DSISR(r13)
590	EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
591	ld	r3,PACA_EXGEN+EX_DAR(r13)
592	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
593	std	r3,_DAR(r1)
594	std	r4,_DSISR(r1)
595	bl	.save_nvgprs
596	addi	r3,r1,STACK_FRAME_OVERHEAD
597	ENABLE_INTS
598	bl	.alignment_exception
599	b	.ret_from_except
600
601	.align	7
602	.globl program_check_common
603program_check_common:
604	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
605	bl	.save_nvgprs
606	addi	r3,r1,STACK_FRAME_OVERHEAD
607	ENABLE_INTS
608	bl	.program_check_exception
609	b	.ret_from_except
610
611	.align	7
612	.globl fp_unavailable_common
613fp_unavailable_common:
614	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
615	bne	1f			/* if from user, just load it up */
616	bl	.save_nvgprs
617	addi	r3,r1,STACK_FRAME_OVERHEAD
618	ENABLE_INTS
619	bl	.kernel_fp_unavailable_exception
620	BUG_OPCODE
6211:	bl	.load_up_fpu
622	b	fast_exception_return
623
624	.align	7
625	.globl altivec_unavailable_common
626altivec_unavailable_common:
627	EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
628#ifdef CONFIG_ALTIVEC
629BEGIN_FTR_SECTION
630	beq	1f
631	bl	.load_up_altivec
632	b	fast_exception_return
6331:
634END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
635#endif
636	bl	.save_nvgprs
637	addi	r3,r1,STACK_FRAME_OVERHEAD
638	ENABLE_INTS
639	bl	.altivec_unavailable_exception
640	b	.ret_from_except
641
642	.align	7
643	.globl vsx_unavailable_common
644vsx_unavailable_common:
645	EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
646#ifdef CONFIG_VSX
647BEGIN_FTR_SECTION
648	bne	.load_up_vsx
6491:
650END_FTR_SECTION_IFSET(CPU_FTR_VSX)
651#endif
652	bl	.save_nvgprs
653	addi	r3,r1,STACK_FRAME_OVERHEAD
654	ENABLE_INTS
655	bl	.vsx_unavailable_exception
656	b	.ret_from_except
657
658	.align	7
659	.globl	__end_handlers
660__end_handlers:
661
662/*
663 * Return from an exception with minimal checks.
664 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
665 * If interrupts have been enabled, or anything has been
666 * done that might have changed the scheduling status of
667 * any task or sent any task a signal, you should use
668 * ret_from_except or ret_from_except_lite instead of this.
669 */
670fast_exc_return_irq:			/* restores irq state too */
671	ld	r3,SOFTE(r1)
672	TRACE_AND_RESTORE_IRQ(r3);
673	ld	r12,_MSR(r1)
674	rldicl	r4,r12,49,63		/* get MSR_EE to LSB */
675	stb	r4,PACAHARDIRQEN(r13)	/* restore paca->hard_enabled */
676	b	1f
677
678	.globl	fast_exception_return
679fast_exception_return:
680	ld	r12,_MSR(r1)
6811:	ld	r11,_NIP(r1)
682	andi.	r3,r12,MSR_RI		/* check if RI is set */
683	beq-	unrecov_fer
684
685#ifdef CONFIG_VIRT_CPU_ACCOUNTING
686	andi.	r3,r12,MSR_PR
687	beq	2f
688	ACCOUNT_CPU_USER_EXIT(r3, r4)
6892:
690#endif
691
692	ld	r3,_CCR(r1)
693	ld	r4,_LINK(r1)
694	ld	r5,_CTR(r1)
695	ld	r6,_XER(r1)
696	mtcr	r3
697	mtlr	r4
698	mtctr	r5
699	mtxer	r6
700	REST_GPR(0, r1)
701	REST_8GPRS(2, r1)
702
703	mfmsr	r10
704	rldicl	r10,r10,48,1		/* clear EE */
705	rldicr	r10,r10,16,61		/* clear RI (LE is 0 already) */
706	mtmsrd	r10,1
707
708	mtspr	SPRN_SRR1,r12
709	mtspr	SPRN_SRR0,r11
710	REST_4GPRS(10, r1)
711	ld	r1,GPR1(r1)
712	rfid
713	b	.	/* prevent speculative execution */
714
715unrecov_fer:
716	bl	.save_nvgprs
7171:	addi	r3,r1,STACK_FRAME_OVERHEAD
718	bl	.unrecoverable_exception
719	b	1b
720
721
722/*
723 * Hash table stuff
724 */
725	.align	7
726_STATIC(do_hash_page)
727	std	r3,_DAR(r1)
728	std	r4,_DSISR(r1)
729
730	andis.	r0,r4,0xa450		/* weird error? */
731	bne-	handle_page_fault	/* if not, try to insert a HPTE */
732BEGIN_FTR_SECTION
733	andis.	r0,r4,0x0020		/* Is it a segment table fault? */
734	bne-	do_ste_alloc		/* If so handle it */
735END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
736
737	clrrdi	r11,r1,THREAD_SHIFT
738	lwz	r0,TI_PREEMPT(r11)	/* If we're in an "NMI" */
739	andis.	r0,r0,NMI_MASK@h	/* (i.e. an irq when soft-disabled) */
740	bne	77f			/* then don't call hash_page now */
741
742	/*
743	 * On iSeries, we soft-disable interrupts here, then
744	 * hard-enable interrupts so that the hash_page code can spin on
745	 * the hash_table_lock without problems on a shared processor.
746	 */
747	DISABLE_INTS
748
749	/*
750	 * Currently, trace_hardirqs_off() will be called by DISABLE_INTS
751	 * and will clobber volatile registers when irq tracing is enabled
752	 * so we need to reload them. It may be possible to be smarter here
753	 * and move the irq tracing elsewhere but let's keep it simple for
754	 * now
755	 */
756#ifdef CONFIG_TRACE_IRQFLAGS
757	ld	r3,_DAR(r1)
758	ld	r4,_DSISR(r1)
759	ld	r5,_TRAP(r1)
760	ld	r12,_MSR(r1)
761	clrrdi	r5,r5,4
762#endif /* CONFIG_TRACE_IRQFLAGS */
763	/*
764	 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
765	 * accessing a userspace segment (even from the kernel). We assume
766	 * kernel addresses always have the high bit set.
767	 */
768	rlwinm	r4,r4,32-25+9,31-9,31-9	/* DSISR_STORE -> _PAGE_RW */
769	rotldi	r0,r3,15		/* Move high bit into MSR_PR posn */
770	orc	r0,r12,r0		/* MSR_PR | ~high_bit */
771	rlwimi	r4,r0,32-13,30,30	/* becomes _PAGE_USER access bit */
772	ori	r4,r4,1			/* add _PAGE_PRESENT */
773	rlwimi	r4,r5,22+2,31-2,31-2	/* Set _PAGE_EXEC if trap is 0x400 */
774
775	/*
776	 * r3 contains the faulting address
777	 * r4 contains the required access permissions
778	 * r5 contains the trap number
779	 *
780	 * at return r3 = 0 for success
781	 */
782	bl	.hash_page		/* build HPTE if possible */
783	cmpdi	r3,0			/* see if hash_page succeeded */
784
785BEGIN_FW_FTR_SECTION
786	/*
787	 * If we had interrupts soft-enabled at the point where the
788	 * DSI/ISI occurred, and an interrupt came in during hash_page,
789	 * handle it now.
790	 * We jump to ret_from_except_lite rather than fast_exception_return
791	 * because ret_from_except_lite will check for and handle pending
792	 * interrupts if necessary.
793	 */
794	beq	13f
795END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
796
797BEGIN_FW_FTR_SECTION
798	/*
799	 * Here we have interrupts hard-disabled, so it is sufficient
800	 * to restore paca->{soft,hard}_enable and get out.
801	 */
802	beq	fast_exc_return_irq	/* Return from exception on success */
803END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
804
805	/* For a hash failure, we don't bother re-enabling interrupts */
806	ble-	12f
807
808	/*
809	 * hash_page couldn't handle it, set soft interrupt enable back
810	 * to what it was before the trap.  Note that .raw_local_irq_restore
811	 * handles any interrupts pending at this point.
812	 */
813	ld	r3,SOFTE(r1)
814	TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f)
815	bl	.raw_local_irq_restore
816	b	11f
817
818/* Here we have a page fault that hash_page can't handle. */
819handle_page_fault:
820	ENABLE_INTS
82111:	ld	r4,_DAR(r1)
822	ld	r5,_DSISR(r1)
823	addi	r3,r1,STACK_FRAME_OVERHEAD
824	bl	.do_page_fault
825	cmpdi	r3,0
826	beq+	13f
827	bl	.save_nvgprs
828	mr	r5,r3
829	addi	r3,r1,STACK_FRAME_OVERHEAD
830	lwz	r4,_DAR(r1)
831	bl	.bad_page_fault
832	b	.ret_from_except
833
83413:	b	.ret_from_except_lite
835
836/* We have a page fault that hash_page could handle but HV refused
837 * the PTE insertion
838 */
83912:	bl	.save_nvgprs
840	mr	r5,r3
841	addi	r3,r1,STACK_FRAME_OVERHEAD
842	ld	r4,_DAR(r1)
843	bl	.low_hash_fault
844	b	.ret_from_except
845
846/*
847 * We come here as a result of a DSI at a point where we don't want
848 * to call hash_page, such as when we are accessing memory (possibly
849 * user memory) inside a PMU interrupt that occurred while interrupts
850 * were soft-disabled.  We want to invoke the exception handler for
851 * the access, or panic if there isn't a handler.
852 */
85377:	bl	.save_nvgprs
854	mr	r4,r3
855	addi	r3,r1,STACK_FRAME_OVERHEAD
856	li	r5,SIGSEGV
857	bl	.bad_page_fault
858	b	.ret_from_except
859
860	/* here we have a segment miss */
861do_ste_alloc:
862	bl	.ste_allocate		/* try to insert stab entry */
863	cmpdi	r3,0
864	bne-	handle_page_fault
865	b	fast_exception_return
866
867/*
868 * r13 points to the PACA, r9 contains the saved CR,
869 * r11 and r12 contain the saved SRR0 and SRR1.
870 * r9 - r13 are saved in paca->exslb.
871 * We assume we aren't going to take any exceptions during this procedure.
872 * We assume (DAR >> 60) == 0xc.
873 */
874	.align	7
875_GLOBAL(do_stab_bolted)
876	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
877	std	r11,PACA_EXSLB+EX_SRR0(r13)	/* save SRR0 in exc. frame */
878
879	/* Hash to the primary group */
880	ld	r10,PACASTABVIRT(r13)
881	mfspr	r11,SPRN_DAR
882	srdi	r11,r11,28
883	rldimi	r10,r11,7,52	/* r10 = first ste of the group */
884
885	/* Calculate VSID */
886	/* This is a kernel address, so protovsid = ESID */
887	ASM_VSID_SCRAMBLE(r11, r9, 256M)
888	rldic	r9,r11,12,16	/* r9 = vsid << 12 */
889
890	/* Search the primary group for a free entry */
8911:	ld	r11,0(r10)	/* Test valid bit of the current ste	*/
892	andi.	r11,r11,0x80
893	beq	2f
894	addi	r10,r10,16
895	andi.	r11,r10,0x70
896	bne	1b
897
898	/* Stick for only searching the primary group for now.		*/
899	/* At least for now, we use a very simple random castout scheme */
900	/* Use the TB as a random number ;  OR in 1 to avoid entry 0	*/
901	mftb	r11
902	rldic	r11,r11,4,57	/* r11 = (r11 << 4) & 0x70 */
903	ori	r11,r11,0x10
904
905	/* r10 currently points to an ste one past the group of interest */
906	/* make it point to the randomly selected entry			*/
907	subi	r10,r10,128
908	or 	r10,r10,r11	/* r10 is the entry to invalidate	*/
909
910	isync			/* mark the entry invalid		*/
911	ld	r11,0(r10)
912	rldicl	r11,r11,56,1	/* clear the valid bit */
913	rotldi	r11,r11,8
914	std	r11,0(r10)
915	sync
916
917	clrrdi	r11,r11,28	/* Get the esid part of the ste		*/
918	slbie	r11
919
9202:	std	r9,8(r10)	/* Store the vsid part of the ste	*/
921	eieio
922
923	mfspr	r11,SPRN_DAR		/* Get the new esid			*/
924	clrrdi	r11,r11,28	/* Permits a full 32b of ESID		*/
925	ori	r11,r11,0x90	/* Turn on valid and kp			*/
926	std	r11,0(r10)	/* Put new entry back into the stab	*/
927
928	sync
929
930	/* All done -- return from exception. */
931	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
932	ld	r11,PACA_EXSLB+EX_SRR0(r13)	/* get saved SRR0 */
933
934	andi.	r10,r12,MSR_RI
935	beq-	unrecov_slb
936
937	mtcrf	0x80,r9			/* restore CR */
938
939	mfmsr	r10
940	clrrdi	r10,r10,2
941	mtmsrd	r10,1
942
943	mtspr	SPRN_SRR0,r11
944	mtspr	SPRN_SRR1,r12
945	ld	r9,PACA_EXSLB+EX_R9(r13)
946	ld	r10,PACA_EXSLB+EX_R10(r13)
947	ld	r11,PACA_EXSLB+EX_R11(r13)
948	ld	r12,PACA_EXSLB+EX_R12(r13)
949	ld	r13,PACA_EXSLB+EX_R13(r13)
950	rfid
951	b	.	/* prevent speculative execution */
952
953/*
954 * Space for CPU0's segment table.
955 *
956 * On iSeries, the hypervisor must fill in at least one entry before
957 * we get control (with relocate on).  The address is given to the hv
958 * as a page number (see xLparMap below), so this must be at a
959 * fixed address (the linker can't compute (u64)&initial_stab >>
960 * PAGE_SHIFT).
961 */
962	. = STAB0_OFFSET	/* 0x6000 */
963	.globl initial_stab
964initial_stab:
965	.space	4096
966
967#ifdef CONFIG_PPC_PSERIES
968/*
969 * Data area reserved for FWNMI option.
970 * This address (0x7000) is fixed by the RPA.
971 */
972	.= 0x7000
973	.globl fwnmi_data_area
974fwnmi_data_area:
975#endif /* CONFIG_PPC_PSERIES */
976
977	/* iSeries does not use the FWNMI stuff, so it is safe to put
978	 * this here, even if we later allow kernels that will boot on
979	 * both pSeries and iSeries */
980#ifdef CONFIG_PPC_ISERIES
981        . = LPARMAP_PHYS
982	.globl xLparMap
983xLparMap:
984	.quad	HvEsidsToMap		/* xNumberEsids */
985	.quad	HvRangesToMap		/* xNumberRanges */
986	.quad	STAB0_PAGE		/* xSegmentTableOffs */
987	.zero	40			/* xRsvd */
988	/* xEsids (HvEsidsToMap entries of 2 quads) */
989	.quad	PAGE_OFFSET_ESID	/* xKernelEsid */
990	.quad	PAGE_OFFSET_VSID	/* xKernelVsid */
991	.quad	VMALLOC_START_ESID	/* xKernelEsid */
992	.quad	VMALLOC_START_VSID	/* xKernelVsid */
993	/* xRanges (HvRangesToMap entries of 3 quads) */
994	.quad	HvPagesToMap		/* xPages */
995	.quad	0			/* xOffset */
996	.quad	PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT)	/* xVPN */
997
998#endif /* CONFIG_PPC_ISERIES */
999
1000#ifdef CONFIG_PPC_PSERIES
1001        . = 0x8000
1002#endif /* CONFIG_PPC_PSERIES */
1003