xref: /linux/arch/powerpc/kernel/exceptions-64s.S (revision 5bdef865eb358b6f3760e25e591ae115e9eeddef)
1/*
2 * This file contains the 64-bit "server" PowerPC variant
3 * of the low level exception handling including exception
4 * vectors, exception return, part of the slb and stab
5 * handling and other fixed offset specific things.
6 *
7 * This file is meant to be #included from head_64.S due to
8 * position dependant assembly.
9 *
10 * Most of this originates from head_64.S and thus has the same
11 * copyright history.
12 *
13 */
14
15/*
16 * We layout physical memory as follows:
17 * 0x0000 - 0x00ff : Secondary processor spin code
18 * 0x0100 - 0x2fff : pSeries Interrupt prologs
19 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
20 * 0x6000 - 0x6fff : Initial (CPU0) segment table
21 * 0x7000 - 0x7fff : FWNMI data area
22 * 0x8000 -        : Early init and support code
23 */
24
25
26/*
27 *   SPRG Usage
28 *
29 *   Register	Definition
30 *
31 *   SPRG0	reserved for hypervisor
32 *   SPRG1	temp - used to save gpr
33 *   SPRG2	temp - used to save gpr
34 *   SPRG3	virt addr of paca
35 */
36
37/*
38 * This is the start of the interrupt handlers for pSeries
39 * This code runs with relocation off.
40 * Code from here to __end_interrupts gets copied down to real
41 * address 0x100 when we are running a relocatable kernel.
42 * Therefore any relative branches in this section must only
43 * branch to labels in this section.
44 */
45	. = 0x100
46	.globl __start_interrupts
47__start_interrupts:
48
49	STD_EXCEPTION_PSERIES(0x100, system_reset)
50
51	. = 0x200
52_machine_check_pSeries:
53	HMT_MEDIUM
54	mtspr	SPRN_SPRG1,r13		/* save r13 */
55	EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
56
57	. = 0x300
58	.globl data_access_pSeries
59data_access_pSeries:
60	HMT_MEDIUM
61	mtspr	SPRN_SPRG1,r13
62BEGIN_FTR_SECTION
63	mtspr	SPRN_SPRG2,r12
64	mfspr	r13,SPRN_DAR
65	mfspr	r12,SPRN_DSISR
66	srdi	r13,r13,60
67	rlwimi	r13,r12,16,0x20
68	mfcr	r12
69	cmpwi	r13,0x2c
70	beq	do_stab_bolted_pSeries
71	mtcrf	0x80,r12
72	mfspr	r12,SPRN_SPRG2
73END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
74	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
75
76	. = 0x380
77	.globl data_access_slb_pSeries
78data_access_slb_pSeries:
79	HMT_MEDIUM
80	mtspr	SPRN_SPRG1,r13
81	mfspr	r13,SPRN_SPRG3		/* get paca address into r13 */
82	std	r3,PACA_EXSLB+EX_R3(r13)
83	mfspr	r3,SPRN_DAR
84	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
85	mfcr	r9
86#ifdef __DISABLED__
87	/* Keep that around for when we re-implement dynamic VSIDs */
88	cmpdi	r3,0
89	bge	slb_miss_user_pseries
90#endif /* __DISABLED__ */
91	std	r10,PACA_EXSLB+EX_R10(r13)
92	std	r11,PACA_EXSLB+EX_R11(r13)
93	std	r12,PACA_EXSLB+EX_R12(r13)
94	mfspr	r10,SPRN_SPRG1
95	std	r10,PACA_EXSLB+EX_R13(r13)
96	mfspr	r12,SPRN_SRR1		/* and SRR1 */
97#ifndef CONFIG_RELOCATABLE
98	b	.slb_miss_realmode
99#else
100	/*
101	 * We can't just use a direct branch to .slb_miss_realmode
102	 * because the distance from here to there depends on where
103	 * the kernel ends up being put.
104	 */
105	mfctr	r11
106	ld	r10,PACAKBASE(r13)
107	LOAD_HANDLER(r10, .slb_miss_realmode)
108	mtctr	r10
109	bctr
110#endif
111
112	STD_EXCEPTION_PSERIES(0x400, instruction_access)
113
114	. = 0x480
115	.globl instruction_access_slb_pSeries
116instruction_access_slb_pSeries:
117	HMT_MEDIUM
118	mtspr	SPRN_SPRG1,r13
119	mfspr	r13,SPRN_SPRG3		/* get paca address into r13 */
120	std	r3,PACA_EXSLB+EX_R3(r13)
121	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
122	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
123	mfcr	r9
124#ifdef __DISABLED__
125	/* Keep that around for when we re-implement dynamic VSIDs */
126	cmpdi	r3,0
127	bge	slb_miss_user_pseries
128#endif /* __DISABLED__ */
129	std	r10,PACA_EXSLB+EX_R10(r13)
130	std	r11,PACA_EXSLB+EX_R11(r13)
131	std	r12,PACA_EXSLB+EX_R12(r13)
132	mfspr	r10,SPRN_SPRG1
133	std	r10,PACA_EXSLB+EX_R13(r13)
134	mfspr	r12,SPRN_SRR1		/* and SRR1 */
135#ifndef CONFIG_RELOCATABLE
136	b	.slb_miss_realmode
137#else
138	mfctr	r11
139	ld	r10,PACAKBASE(r13)
140	LOAD_HANDLER(r10, .slb_miss_realmode)
141	mtctr	r10
142	bctr
143#endif
144
145	MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
146	STD_EXCEPTION_PSERIES(0x600, alignment)
147	STD_EXCEPTION_PSERIES(0x700, program_check)
148	STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
149	MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
150	STD_EXCEPTION_PSERIES(0xa00, trap_0a)
151	STD_EXCEPTION_PSERIES(0xb00, trap_0b)
152
153	. = 0xc00
154	.globl	system_call_pSeries
155system_call_pSeries:
156	HMT_MEDIUM
157BEGIN_FTR_SECTION
158	cmpdi	r0,0x1ebe
159	beq-	1f
160END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
161	mr	r9,r13
162	mfspr	r13,SPRN_SPRG3
163	mfspr	r11,SPRN_SRR0
164	ld	r12,PACAKBASE(r13)
165	ld	r10,PACAKMSR(r13)
166	LOAD_HANDLER(r12, system_call_entry)
167	mtspr	SPRN_SRR0,r12
168	mfspr	r12,SPRN_SRR1
169	mtspr	SPRN_SRR1,r10
170	rfid
171	b	.	/* prevent speculative execution */
172
173/* Fast LE/BE switch system call */
1741:	mfspr	r12,SPRN_SRR1
175	xori	r12,r12,MSR_LE
176	mtspr	SPRN_SRR1,r12
177	rfid		/* return to userspace */
178	b	.
179
180	STD_EXCEPTION_PSERIES(0xd00, single_step)
181	STD_EXCEPTION_PSERIES(0xe00, trap_0e)
182
183	/* We need to deal with the Altivec unavailable exception
184	 * here which is at 0xf20, thus in the middle of the
185	 * prolog code of the PerformanceMonitor one. A little
186	 * trickery is thus necessary
187	 */
188	. = 0xf00
189	b	performance_monitor_pSeries
190
191	. = 0xf20
192	b	altivec_unavailable_pSeries
193
194	. = 0xf40
195	b	vsx_unavailable_pSeries
196
197#ifdef CONFIG_CBE_RAS
198	HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
199#endif /* CONFIG_CBE_RAS */
200	STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
201#ifdef CONFIG_CBE_RAS
202	HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance)
203#endif /* CONFIG_CBE_RAS */
204	STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
205#ifdef CONFIG_CBE_RAS
206	HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal)
207#endif /* CONFIG_CBE_RAS */
208
209	. = 0x3000
210
211/*** pSeries interrupt support ***/
212
213	/* moved from 0xf00 */
214	STD_EXCEPTION_PSERIES(., performance_monitor)
215	STD_EXCEPTION_PSERIES(., altivec_unavailable)
216	STD_EXCEPTION_PSERIES(., vsx_unavailable)
217
218/*
219 * An interrupt came in while soft-disabled; clear EE in SRR1,
220 * clear paca->hard_enabled and return.
221 */
222masked_interrupt:
223	stb	r10,PACAHARDIRQEN(r13)
224	mtcrf	0x80,r9
225	ld	r9,PACA_EXGEN+EX_R9(r13)
226	mfspr	r10,SPRN_SRR1
227	rldicl	r10,r10,48,1		/* clear MSR_EE */
228	rotldi	r10,r10,16
229	mtspr	SPRN_SRR1,r10
230	ld	r10,PACA_EXGEN+EX_R10(r13)
231	mfspr	r13,SPRN_SPRG1
232	rfid
233	b	.
234
235	.align	7
236do_stab_bolted_pSeries:
237	mtcrf	0x80,r12
238	mfspr	r12,SPRN_SPRG2
239	EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
240
241#ifdef CONFIG_PPC_PSERIES
242/*
243 * Vectors for the FWNMI option.  Share common code.
244 */
245	.globl system_reset_fwnmi
246      .align 7
247system_reset_fwnmi:
248	HMT_MEDIUM
249	mtspr	SPRN_SPRG1,r13		/* save r13 */
250	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
251
252	.globl machine_check_fwnmi
253      .align 7
254machine_check_fwnmi:
255	HMT_MEDIUM
256	mtspr	SPRN_SPRG1,r13		/* save r13 */
257	EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
258
259#endif /* CONFIG_PPC_PSERIES */
260
261#ifdef __DISABLED__
262/*
263 * This is used for when the SLB miss handler has to go virtual,
264 * which doesn't happen for now anymore but will once we re-implement
265 * dynamic VSIDs for shared page tables
266 */
267slb_miss_user_pseries:
268	std	r10,PACA_EXGEN+EX_R10(r13)
269	std	r11,PACA_EXGEN+EX_R11(r13)
270	std	r12,PACA_EXGEN+EX_R12(r13)
271	mfspr	r10,SPRG1
272	ld	r11,PACA_EXSLB+EX_R9(r13)
273	ld	r12,PACA_EXSLB+EX_R3(r13)
274	std	r10,PACA_EXGEN+EX_R13(r13)
275	std	r11,PACA_EXGEN+EX_R9(r13)
276	std	r12,PACA_EXGEN+EX_R3(r13)
277	clrrdi	r12,r13,32
278	mfmsr	r10
279	mfspr	r11,SRR0			/* save SRR0 */
280	ori	r12,r12,slb_miss_user_common@l	/* virt addr of handler */
281	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
282	mtspr	SRR0,r12
283	mfspr	r12,SRR1			/* and SRR1 */
284	mtspr	SRR1,r10
285	rfid
286	b	.				/* prevent spec. execution */
287#endif /* __DISABLED__ */
288
289	.align	7
290	.globl	__end_interrupts
291__end_interrupts:
292
293/*
294 * Code from here down to __end_handlers is invoked from the
295 * exception prologs above.  Because the prologs assemble the
296 * addresses of these handlers using the LOAD_HANDLER macro,
297 * which uses an addi instruction, these handlers must be in
298 * the first 32k of the kernel image.
299 */
300
301/*** Common interrupt handlers ***/
302
303	STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
304
305	/*
306	 * Machine check is different because we use a different
307	 * save area: PACA_EXMC instead of PACA_EXGEN.
308	 */
309	.align	7
310	.globl machine_check_common
311machine_check_common:
312	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
313	FINISH_NAP
314	DISABLE_INTS
315	bl	.save_nvgprs
316	addi	r3,r1,STACK_FRAME_OVERHEAD
317	bl	.machine_check_exception
318	b	.ret_from_except
319
320	STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
321	STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
322	STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
323	STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
324	STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
325	STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
326	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
327#ifdef CONFIG_ALTIVEC
328	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
329#else
330	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
331#endif
332#ifdef CONFIG_CBE_RAS
333	STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
334	STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
335	STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
336#endif /* CONFIG_CBE_RAS */
337
338	.align	7
339system_call_entry:
340	b	system_call_common
341
342/*
343 * Here we have detected that the kernel stack pointer is bad.
344 * R9 contains the saved CR, r13 points to the paca,
345 * r10 contains the (bad) kernel stack pointer,
346 * r11 and r12 contain the saved SRR0 and SRR1.
347 * We switch to using an emergency stack, save the registers there,
348 * and call kernel_bad_stack(), which panics.
349 */
350bad_stack:
351	ld	r1,PACAEMERGSP(r13)
352	subi	r1,r1,64+INT_FRAME_SIZE
353	std	r9,_CCR(r1)
354	std	r10,GPR1(r1)
355	std	r11,_NIP(r1)
356	std	r12,_MSR(r1)
357	mfspr	r11,SPRN_DAR
358	mfspr	r12,SPRN_DSISR
359	std	r11,_DAR(r1)
360	std	r12,_DSISR(r1)
361	mflr	r10
362	mfctr	r11
363	mfxer	r12
364	std	r10,_LINK(r1)
365	std	r11,_CTR(r1)
366	std	r12,_XER(r1)
367	SAVE_GPR(0,r1)
368	SAVE_GPR(2,r1)
369	SAVE_4GPRS(3,r1)
370	SAVE_2GPRS(7,r1)
371	SAVE_10GPRS(12,r1)
372	SAVE_10GPRS(22,r1)
373	lhz	r12,PACA_TRAP_SAVE(r13)
374	std	r12,_TRAP(r1)
375	addi	r11,r1,INT_FRAME_SIZE
376	std	r11,0(r1)
377	li	r12,0
378	std	r12,0(r11)
379	ld	r2,PACATOC(r13)
3801:	addi	r3,r1,STACK_FRAME_OVERHEAD
381	bl	.kernel_bad_stack
382	b	1b
383
384/*
385 * Here r13 points to the paca, r9 contains the saved CR,
386 * SRR0 and SRR1 are saved in r11 and r12,
387 * r9 - r13 are saved in paca->exgen.
388 */
389	.align	7
390	.globl data_access_common
391data_access_common:
392	mfspr	r10,SPRN_DAR
393	std	r10,PACA_EXGEN+EX_DAR(r13)
394	mfspr	r10,SPRN_DSISR
395	stw	r10,PACA_EXGEN+EX_DSISR(r13)
396	EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
397	ld	r3,PACA_EXGEN+EX_DAR(r13)
398	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
399	li	r5,0x300
400	b	.do_hash_page	 	/* Try to handle as hpte fault */
401
402	.align	7
403	.globl instruction_access_common
404instruction_access_common:
405	EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
406	ld	r3,_NIP(r1)
407	andis.	r4,r12,0x5820
408	li	r5,0x400
409	b	.do_hash_page		/* Try to handle as hpte fault */
410
411/*
412 * Here is the common SLB miss user that is used when going to virtual
413 * mode for SLB misses, that is currently not used
414 */
415#ifdef __DISABLED__
416	.align	7
417	.globl	slb_miss_user_common
418slb_miss_user_common:
419	mflr	r10
420	std	r3,PACA_EXGEN+EX_DAR(r13)
421	stw	r9,PACA_EXGEN+EX_CCR(r13)
422	std	r10,PACA_EXGEN+EX_LR(r13)
423	std	r11,PACA_EXGEN+EX_SRR0(r13)
424	bl	.slb_allocate_user
425
426	ld	r10,PACA_EXGEN+EX_LR(r13)
427	ld	r3,PACA_EXGEN+EX_R3(r13)
428	lwz	r9,PACA_EXGEN+EX_CCR(r13)
429	ld	r11,PACA_EXGEN+EX_SRR0(r13)
430	mtlr	r10
431	beq-	slb_miss_fault
432
433	andi.	r10,r12,MSR_RI		/* check for unrecoverable exception */
434	beq-	unrecov_user_slb
435	mfmsr	r10
436
437.machine push
438.machine "power4"
439	mtcrf	0x80,r9
440.machine pop
441
442	clrrdi	r10,r10,2		/* clear RI before setting SRR0/1 */
443	mtmsrd	r10,1
444
445	mtspr	SRR0,r11
446	mtspr	SRR1,r12
447
448	ld	r9,PACA_EXGEN+EX_R9(r13)
449	ld	r10,PACA_EXGEN+EX_R10(r13)
450	ld	r11,PACA_EXGEN+EX_R11(r13)
451	ld	r12,PACA_EXGEN+EX_R12(r13)
452	ld	r13,PACA_EXGEN+EX_R13(r13)
453	rfid
454	b	.
455
456slb_miss_fault:
457	EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
458	ld	r4,PACA_EXGEN+EX_DAR(r13)
459	li	r5,0
460	std	r4,_DAR(r1)
461	std	r5,_DSISR(r1)
462	b	handle_page_fault
463
464unrecov_user_slb:
465	EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
466	DISABLE_INTS
467	bl	.save_nvgprs
4681:	addi	r3,r1,STACK_FRAME_OVERHEAD
469	bl	.unrecoverable_exception
470	b	1b
471
472#endif /* __DISABLED__ */
473
474
475/*
476 * r13 points to the PACA, r9 contains the saved CR,
477 * r12 contain the saved SRR1, SRR0 is still ready for return
478 * r3 has the faulting address
479 * r9 - r13 are saved in paca->exslb.
480 * r3 is saved in paca->slb_r3
481 * We assume we aren't going to take any exceptions during this procedure.
482 */
483_GLOBAL(slb_miss_realmode)
484	mflr	r10
485#ifdef CONFIG_RELOCATABLE
486	mtctr	r11
487#endif
488
489	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
490	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
491
492	bl	.slb_allocate_realmode
493
494	/* All done -- return from exception. */
495
496	ld	r10,PACA_EXSLB+EX_LR(r13)
497	ld	r3,PACA_EXSLB+EX_R3(r13)
498	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
499#ifdef CONFIG_PPC_ISERIES
500BEGIN_FW_FTR_SECTION
501	ld	r11,PACALPPACAPTR(r13)
502	ld	r11,LPPACASRR0(r11)		/* get SRR0 value */
503END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
504#endif /* CONFIG_PPC_ISERIES */
505
506	mtlr	r10
507
508	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
509	beq-	2f
510
511.machine	push
512.machine	"power4"
513	mtcrf	0x80,r9
514	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
515.machine	pop
516
517#ifdef CONFIG_PPC_ISERIES
518BEGIN_FW_FTR_SECTION
519	mtspr	SPRN_SRR0,r11
520	mtspr	SPRN_SRR1,r12
521END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
522#endif /* CONFIG_PPC_ISERIES */
523	ld	r9,PACA_EXSLB+EX_R9(r13)
524	ld	r10,PACA_EXSLB+EX_R10(r13)
525	ld	r11,PACA_EXSLB+EX_R11(r13)
526	ld	r12,PACA_EXSLB+EX_R12(r13)
527	ld	r13,PACA_EXSLB+EX_R13(r13)
528	rfid
529	b	.	/* prevent speculative execution */
530
5312:
532#ifdef CONFIG_PPC_ISERIES
533BEGIN_FW_FTR_SECTION
534	b	unrecov_slb
535END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
536#endif /* CONFIG_PPC_ISERIES */
537	mfspr	r11,SPRN_SRR0
538	ld	r10,PACAKBASE(r13)
539	LOAD_HANDLER(r10,unrecov_slb)
540	mtspr	SPRN_SRR0,r10
541	ld	r10,PACAKMSR(r13)
542	mtspr	SPRN_SRR1,r10
543	rfid
544	b	.
545
546unrecov_slb:
547	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
548	DISABLE_INTS
549	bl	.save_nvgprs
5501:	addi	r3,r1,STACK_FRAME_OVERHEAD
551	bl	.unrecoverable_exception
552	b	1b
553
554	.align	7
555	.globl hardware_interrupt_common
556	.globl hardware_interrupt_entry
557hardware_interrupt_common:
558	EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
559	FINISH_NAP
560hardware_interrupt_entry:
561	DISABLE_INTS
562BEGIN_FTR_SECTION
563	bl	.ppc64_runlatch_on
564END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
565	addi	r3,r1,STACK_FRAME_OVERHEAD
566	bl	.do_IRQ
567	b	.ret_from_except_lite
568
569#ifdef CONFIG_PPC_970_NAP
570power4_fixup_nap:
571	andc	r9,r9,r10
572	std	r9,TI_LOCAL_FLAGS(r11)
573	ld	r10,_LINK(r1)		/* make idle task do the */
574	std	r10,_NIP(r1)		/* equivalent of a blr */
575	blr
576#endif
577
578	.align	7
579	.globl alignment_common
580alignment_common:
581	mfspr	r10,SPRN_DAR
582	std	r10,PACA_EXGEN+EX_DAR(r13)
583	mfspr	r10,SPRN_DSISR
584	stw	r10,PACA_EXGEN+EX_DSISR(r13)
585	EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
586	ld	r3,PACA_EXGEN+EX_DAR(r13)
587	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
588	std	r3,_DAR(r1)
589	std	r4,_DSISR(r1)
590	bl	.save_nvgprs
591	addi	r3,r1,STACK_FRAME_OVERHEAD
592	ENABLE_INTS
593	bl	.alignment_exception
594	b	.ret_from_except
595
596	.align	7
597	.globl program_check_common
598program_check_common:
599	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
600	bl	.save_nvgprs
601	addi	r3,r1,STACK_FRAME_OVERHEAD
602	ENABLE_INTS
603	bl	.program_check_exception
604	b	.ret_from_except
605
606	.align	7
607	.globl fp_unavailable_common
608fp_unavailable_common:
609	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
610	bne	1f			/* if from user, just load it up */
611	bl	.save_nvgprs
612	addi	r3,r1,STACK_FRAME_OVERHEAD
613	ENABLE_INTS
614	bl	.kernel_fp_unavailable_exception
615	BUG_OPCODE
6161:	bl	.load_up_fpu
617	b	fast_exception_return
618
619	.align	7
620	.globl altivec_unavailable_common
621altivec_unavailable_common:
622	EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
623#ifdef CONFIG_ALTIVEC
624BEGIN_FTR_SECTION
625	beq	1f
626	bl	.load_up_altivec
627	b	fast_exception_return
6281:
629END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
630#endif
631	bl	.save_nvgprs
632	addi	r3,r1,STACK_FRAME_OVERHEAD
633	ENABLE_INTS
634	bl	.altivec_unavailable_exception
635	b	.ret_from_except
636
637	.align	7
638	.globl vsx_unavailable_common
639vsx_unavailable_common:
640	EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
641#ifdef CONFIG_VSX
642BEGIN_FTR_SECTION
643	bne	.load_up_vsx
6441:
645END_FTR_SECTION_IFSET(CPU_FTR_VSX)
646#endif
647	bl	.save_nvgprs
648	addi	r3,r1,STACK_FRAME_OVERHEAD
649	ENABLE_INTS
650	bl	.vsx_unavailable_exception
651	b	.ret_from_except
652
653	.align	7
654	.globl	__end_handlers
655__end_handlers:
656
657/*
658 * Return from an exception with minimal checks.
659 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
660 * If interrupts have been enabled, or anything has been
661 * done that might have changed the scheduling status of
662 * any task or sent any task a signal, you should use
663 * ret_from_except or ret_from_except_lite instead of this.
664 */
665fast_exc_return_irq:			/* restores irq state too */
666	ld	r3,SOFTE(r1)
667	TRACE_AND_RESTORE_IRQ(r3);
668	ld	r12,_MSR(r1)
669	rldicl	r4,r12,49,63		/* get MSR_EE to LSB */
670	stb	r4,PACAHARDIRQEN(r13)	/* restore paca->hard_enabled */
671	b	1f
672
673	.globl	fast_exception_return
674fast_exception_return:
675	ld	r12,_MSR(r1)
6761:	ld	r11,_NIP(r1)
677	andi.	r3,r12,MSR_RI		/* check if RI is set */
678	beq-	unrecov_fer
679
680#ifdef CONFIG_VIRT_CPU_ACCOUNTING
681	andi.	r3,r12,MSR_PR
682	beq	2f
683	ACCOUNT_CPU_USER_EXIT(r3, r4)
6842:
685#endif
686
687	ld	r3,_CCR(r1)
688	ld	r4,_LINK(r1)
689	ld	r5,_CTR(r1)
690	ld	r6,_XER(r1)
691	mtcr	r3
692	mtlr	r4
693	mtctr	r5
694	mtxer	r6
695	REST_GPR(0, r1)
696	REST_8GPRS(2, r1)
697
698	mfmsr	r10
699	rldicl	r10,r10,48,1		/* clear EE */
700	rldicr	r10,r10,16,61		/* clear RI (LE is 0 already) */
701	mtmsrd	r10,1
702
703	mtspr	SPRN_SRR1,r12
704	mtspr	SPRN_SRR0,r11
705	REST_4GPRS(10, r1)
706	ld	r1,GPR1(r1)
707	rfid
708	b	.	/* prevent speculative execution */
709
710unrecov_fer:
711	bl	.save_nvgprs
7121:	addi	r3,r1,STACK_FRAME_OVERHEAD
713	bl	.unrecoverable_exception
714	b	1b
715
716
717/*
718 * Hash table stuff
719 */
720	.align	7
721_STATIC(do_hash_page)
722	std	r3,_DAR(r1)
723	std	r4,_DSISR(r1)
724
725	andis.	r0,r4,0xa450		/* weird error? */
726	bne-	handle_page_fault	/* if not, try to insert a HPTE */
727BEGIN_FTR_SECTION
728	andis.	r0,r4,0x0020		/* Is it a segment table fault? */
729	bne-	do_ste_alloc		/* If so handle it */
730END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
731
732	/*
733	 * On iSeries, we soft-disable interrupts here, then
734	 * hard-enable interrupts so that the hash_page code can spin on
735	 * the hash_table_lock without problems on a shared processor.
736	 */
737	DISABLE_INTS
738
739	/*
740	 * Currently, trace_hardirqs_off() will be called by DISABLE_INTS
741	 * and will clobber volatile registers when irq tracing is enabled
742	 * so we need to reload them. It may be possible to be smarter here
743	 * and move the irq tracing elsewhere but let's keep it simple for
744	 * now
745	 */
746#ifdef CONFIG_TRACE_IRQFLAGS
747	ld	r3,_DAR(r1)
748	ld	r4,_DSISR(r1)
749	ld	r5,_TRAP(r1)
750	ld	r12,_MSR(r1)
751	clrrdi	r5,r5,4
752#endif /* CONFIG_TRACE_IRQFLAGS */
753	/*
754	 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
755	 * accessing a userspace segment (even from the kernel). We assume
756	 * kernel addresses always have the high bit set.
757	 */
758	rlwinm	r4,r4,32-25+9,31-9,31-9	/* DSISR_STORE -> _PAGE_RW */
759	rotldi	r0,r3,15		/* Move high bit into MSR_PR posn */
760	orc	r0,r12,r0		/* MSR_PR | ~high_bit */
761	rlwimi	r4,r0,32-13,30,30	/* becomes _PAGE_USER access bit */
762	ori	r4,r4,1			/* add _PAGE_PRESENT */
763	rlwimi	r4,r5,22+2,31-2,31-2	/* Set _PAGE_EXEC if trap is 0x400 */
764
765	/*
766	 * r3 contains the faulting address
767	 * r4 contains the required access permissions
768	 * r5 contains the trap number
769	 *
770	 * at return r3 = 0 for success
771	 */
772	bl	.hash_page		/* build HPTE if possible */
773	cmpdi	r3,0			/* see if hash_page succeeded */
774
775BEGIN_FW_FTR_SECTION
776	/*
777	 * If we had interrupts soft-enabled at the point where the
778	 * DSI/ISI occurred, and an interrupt came in during hash_page,
779	 * handle it now.
780	 * We jump to ret_from_except_lite rather than fast_exception_return
781	 * because ret_from_except_lite will check for and handle pending
782	 * interrupts if necessary.
783	 */
784	beq	13f
785END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
786
787BEGIN_FW_FTR_SECTION
788	/*
789	 * Here we have interrupts hard-disabled, so it is sufficient
790	 * to restore paca->{soft,hard}_enable and get out.
791	 */
792	beq	fast_exc_return_irq	/* Return from exception on success */
793END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
794
795	/* For a hash failure, we don't bother re-enabling interrupts */
796	ble-	12f
797
798	/*
799	 * hash_page couldn't handle it, set soft interrupt enable back
800	 * to what it was before the trap.  Note that .raw_local_irq_restore
801	 * handles any interrupts pending at this point.
802	 */
803	ld	r3,SOFTE(r1)
804	TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f)
805	bl	.raw_local_irq_restore
806	b	11f
807
808/* Here we have a page fault that hash_page can't handle. */
809handle_page_fault:
810	ENABLE_INTS
81111:	ld	r4,_DAR(r1)
812	ld	r5,_DSISR(r1)
813	addi	r3,r1,STACK_FRAME_OVERHEAD
814	bl	.do_page_fault
815	cmpdi	r3,0
816	beq+	13f
817	bl	.save_nvgprs
818	mr	r5,r3
819	addi	r3,r1,STACK_FRAME_OVERHEAD
820	lwz	r4,_DAR(r1)
821	bl	.bad_page_fault
822	b	.ret_from_except
823
82413:	b	.ret_from_except_lite
825
826/* We have a page fault that hash_page could handle but HV refused
827 * the PTE insertion
828 */
82912:	bl	.save_nvgprs
830	mr	r5,r3
831	addi	r3,r1,STACK_FRAME_OVERHEAD
832	ld	r4,_DAR(r1)
833	bl	.low_hash_fault
834	b	.ret_from_except
835
836	/* here we have a segment miss */
837do_ste_alloc:
838	bl	.ste_allocate		/* try to insert stab entry */
839	cmpdi	r3,0
840	bne-	handle_page_fault
841	b	fast_exception_return
842
843/*
844 * r13 points to the PACA, r9 contains the saved CR,
845 * r11 and r12 contain the saved SRR0 and SRR1.
846 * r9 - r13 are saved in paca->exslb.
847 * We assume we aren't going to take any exceptions during this procedure.
848 * We assume (DAR >> 60) == 0xc.
849 */
850	.align	7
851_GLOBAL(do_stab_bolted)
852	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
853	std	r11,PACA_EXSLB+EX_SRR0(r13)	/* save SRR0 in exc. frame */
854
855	/* Hash to the primary group */
856	ld	r10,PACASTABVIRT(r13)
857	mfspr	r11,SPRN_DAR
858	srdi	r11,r11,28
859	rldimi	r10,r11,7,52	/* r10 = first ste of the group */
860
861	/* Calculate VSID */
862	/* This is a kernel address, so protovsid = ESID */
863	ASM_VSID_SCRAMBLE(r11, r9, 256M)
864	rldic	r9,r11,12,16	/* r9 = vsid << 12 */
865
866	/* Search the primary group for a free entry */
8671:	ld	r11,0(r10)	/* Test valid bit of the current ste	*/
868	andi.	r11,r11,0x80
869	beq	2f
870	addi	r10,r10,16
871	andi.	r11,r10,0x70
872	bne	1b
873
874	/* Stick for only searching the primary group for now.		*/
875	/* At least for now, we use a very simple random castout scheme */
876	/* Use the TB as a random number ;  OR in 1 to avoid entry 0	*/
877	mftb	r11
878	rldic	r11,r11,4,57	/* r11 = (r11 << 4) & 0x70 */
879	ori	r11,r11,0x10
880
881	/* r10 currently points to an ste one past the group of interest */
882	/* make it point to the randomly selected entry			*/
883	subi	r10,r10,128
884	or 	r10,r10,r11	/* r10 is the entry to invalidate	*/
885
886	isync			/* mark the entry invalid		*/
887	ld	r11,0(r10)
888	rldicl	r11,r11,56,1	/* clear the valid bit */
889	rotldi	r11,r11,8
890	std	r11,0(r10)
891	sync
892
893	clrrdi	r11,r11,28	/* Get the esid part of the ste		*/
894	slbie	r11
895
8962:	std	r9,8(r10)	/* Store the vsid part of the ste	*/
897	eieio
898
899	mfspr	r11,SPRN_DAR		/* Get the new esid			*/
900	clrrdi	r11,r11,28	/* Permits a full 32b of ESID		*/
901	ori	r11,r11,0x90	/* Turn on valid and kp			*/
902	std	r11,0(r10)	/* Put new entry back into the stab	*/
903
904	sync
905
906	/* All done -- return from exception. */
907	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
908	ld	r11,PACA_EXSLB+EX_SRR0(r13)	/* get saved SRR0 */
909
910	andi.	r10,r12,MSR_RI
911	beq-	unrecov_slb
912
913	mtcrf	0x80,r9			/* restore CR */
914
915	mfmsr	r10
916	clrrdi	r10,r10,2
917	mtmsrd	r10,1
918
919	mtspr	SPRN_SRR0,r11
920	mtspr	SPRN_SRR1,r12
921	ld	r9,PACA_EXSLB+EX_R9(r13)
922	ld	r10,PACA_EXSLB+EX_R10(r13)
923	ld	r11,PACA_EXSLB+EX_R11(r13)
924	ld	r12,PACA_EXSLB+EX_R12(r13)
925	ld	r13,PACA_EXSLB+EX_R13(r13)
926	rfid
927	b	.	/* prevent speculative execution */
928
929/*
930 * Space for CPU0's segment table.
931 *
932 * On iSeries, the hypervisor must fill in at least one entry before
933 * we get control (with relocate on).  The address is given to the hv
934 * as a page number (see xLparMap below), so this must be at a
935 * fixed address (the linker can't compute (u64)&initial_stab >>
936 * PAGE_SHIFT).
937 */
938	. = STAB0_OFFSET	/* 0x6000 */
939	.globl initial_stab
940initial_stab:
941	.space	4096
942
943#ifdef CONFIG_PPC_PSERIES
944/*
945 * Data area reserved for FWNMI option.
946 * This address (0x7000) is fixed by the RPA.
947 */
948	.= 0x7000
949	.globl fwnmi_data_area
950fwnmi_data_area:
951#endif /* CONFIG_PPC_PSERIES */
952
953	/* iSeries does not use the FWNMI stuff, so it is safe to put
954	 * this here, even if we later allow kernels that will boot on
955	 * both pSeries and iSeries */
956#ifdef CONFIG_PPC_ISERIES
957        . = LPARMAP_PHYS
958	.globl xLparMap
959xLparMap:
960	.quad	HvEsidsToMap		/* xNumberEsids */
961	.quad	HvRangesToMap		/* xNumberRanges */
962	.quad	STAB0_PAGE		/* xSegmentTableOffs */
963	.zero	40			/* xRsvd */
964	/* xEsids (HvEsidsToMap entries of 2 quads) */
965	.quad	PAGE_OFFSET_ESID	/* xKernelEsid */
966	.quad	PAGE_OFFSET_VSID	/* xKernelVsid */
967	.quad	VMALLOC_START_ESID	/* xKernelEsid */
968	.quad	VMALLOC_START_VSID	/* xKernelVsid */
969	/* xRanges (HvRangesToMap entries of 3 quads) */
970	.quad	HvPagesToMap		/* xPages */
971	.quad	0			/* xOffset */
972	.quad	PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT)	/* xVPN */
973
974#endif /* CONFIG_PPC_ISERIES */
975
976#ifdef CONFIG_PPC_PSERIES
977        . = 0x8000
978#endif /* CONFIG_PPC_PSERIES */
979