xref: /linux/arch/powerpc/kernel/interrupt_64.S (revision 38fe0e0156c037c060f81fe4e36549fae760322d)
1#include <asm/asm-offsets.h>
2#include <asm/bug.h>
3#ifdef CONFIG_PPC_BOOK3S
4#include <asm/exception-64s.h>
5#else
6#include <asm/exception-64e.h>
7#endif
8#include <asm/feature-fixups.h>
9#include <asm/head-64.h>
10#include <asm/hw_irq.h>
11#include <asm/kup.h>
12#include <asm/mmu.h>
13#include <asm/ppc_asm.h>
14#include <asm/ptrace.h>
15#include <asm/tm.h>
16
17	.section	".toc","aw"
18SYS_CALL_TABLE:
19	.tc sys_call_table[TC],sys_call_table
20
21#ifdef CONFIG_COMPAT
22COMPAT_SYS_CALL_TABLE:
23	.tc compat_sys_call_table[TC],compat_sys_call_table
24#endif
25	.previous
26
27	.align 7
28
29.macro DEBUG_SRR_VALID srr
30#ifdef CONFIG_PPC_RFI_SRR_DEBUG
31	.ifc \srr,srr
32	mfspr	r11,SPRN_SRR0
33	ld	r12,_NIP(r1)
34100:	tdne	r11,r12
35	EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
36	mfspr	r11,SPRN_SRR1
37	ld	r12,_MSR(r1)
38100:	tdne	r11,r12
39	EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
40	.else
41	mfspr	r11,SPRN_HSRR0
42	ld	r12,_NIP(r1)
43100:	tdne	r11,r12
44	EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
45	mfspr	r11,SPRN_HSRR1
46	ld	r12,_MSR(r1)
47100:	tdne	r11,r12
48	EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
49	.endif
50#endif
51.endm
52
53#ifdef CONFIG_PPC_BOOK3S
54.macro system_call_vectored name trapnr
55	.globl system_call_vectored_\name
56system_call_vectored_\name:
57_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
58#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
59BEGIN_FTR_SECTION
60	extrdi.	r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
61	bne	tabort_syscall
62END_FTR_SECTION_IFSET(CPU_FTR_TM)
63#endif
64	SCV_INTERRUPT_TO_KERNEL
65	mr	r10,r1
66	ld	r1,PACAKSAVE(r13)
67	std	r10,0(r1)
68	std	r11,_NIP(r1)
69	std	r12,_MSR(r1)
70	std	r0,GPR0(r1)
71	std	r10,GPR1(r1)
72	std	r2,GPR2(r1)
73	ld	r2,PACATOC(r13)
74	mfcr	r12
75	li	r11,0
76	/* Can we avoid saving r3-r8 in common case? */
77	std	r3,GPR3(r1)
78	std	r4,GPR4(r1)
79	std	r5,GPR5(r1)
80	std	r6,GPR6(r1)
81	std	r7,GPR7(r1)
82	std	r8,GPR8(r1)
83	/* Zero r9-r12, this should only be required when restoring all GPRs */
84	std	r11,GPR9(r1)
85	std	r11,GPR10(r1)
86	std	r11,GPR11(r1)
87	std	r11,GPR12(r1)
88	std	r9,GPR13(r1)
89	SAVE_NVGPRS(r1)
90	std	r11,_XER(r1)
91	std	r11,_LINK(r1)
92	std	r11,_CTR(r1)
93
94	li	r11,\trapnr
95	std	r11,_TRAP(r1)
96	std	r12,_CCR(r1)
97	addi	r10,r1,STACK_FRAME_OVERHEAD
98	ld	r11,exception_marker@toc(r2)
99	std	r11,-16(r10)		/* "regshere" marker */
100
101BEGIN_FTR_SECTION
102	HMT_MEDIUM
103END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
104
105	/*
106	 * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
107	 * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
108	 * and interrupts may be masked and pending already.
109	 * system_call_exception() will call trace_hardirqs_off() which means
110	 * interrupts could already have been blocked before trace_hardirqs_off,
111	 * but this is the best we can do.
112	 */
113
114	/* Calling convention has r9 = orig r0, r10 = regs */
115	mr	r9,r0
116	bl	system_call_exception
117
118.Lsyscall_vectored_\name\()_exit:
119	addi	r4,r1,STACK_FRAME_OVERHEAD
120	li	r5,1 /* scv */
121	bl	syscall_exit_prepare
122	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
123.Lsyscall_vectored_\name\()_rst_start:
124	lbz	r11,PACAIRQHAPPENED(r13)
125	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
126	bne-	syscall_vectored_\name\()_restart
127	li	r11,IRQS_ENABLED
128	stb	r11,PACAIRQSOFTMASK(r13)
129	li	r11,0
130	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
131
132	ld	r2,_CCR(r1)
133	ld	r4,_NIP(r1)
134	ld	r5,_MSR(r1)
135
136BEGIN_FTR_SECTION
137	stdcx.	r0,0,r1			/* to clear the reservation */
138END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
139
140BEGIN_FTR_SECTION
141	HMT_MEDIUM_LOW
142END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
143
144	cmpdi	r3,0
145	bne	.Lsyscall_vectored_\name\()_restore_regs
146
147	/* rfscv returns with LR->NIA and CTR->MSR */
148	mtlr	r4
149	mtctr	r5
150
151	/* Could zero these as per ABI, but we may consider a stricter ABI
152	 * which preserves these if libc implementations can benefit, so
153	 * restore them for now until further measurement is done. */
154	ld	r0,GPR0(r1)
155	ld	r4,GPR4(r1)
156	ld	r5,GPR5(r1)
157	ld	r6,GPR6(r1)
158	ld	r7,GPR7(r1)
159	ld	r8,GPR8(r1)
160	/* Zero volatile regs that may contain sensitive kernel data */
161	li	r9,0
162	li	r10,0
163	li	r11,0
164	li	r12,0
165	mtspr	SPRN_XER,r0
166
167	/*
168	 * We don't need to restore AMR on the way back to userspace for KUAP.
169	 * The value of AMR only matters while we're in the kernel.
170	 */
171	mtcr	r2
172	ld	r2,GPR2(r1)
173	ld	r3,GPR3(r1)
174	ld	r13,GPR13(r1)
175	ld	r1,GPR1(r1)
176	RFSCV_TO_USER
177	b	.	/* prevent speculative execution */
178
179.Lsyscall_vectored_\name\()_restore_regs:
180	mtspr	SPRN_SRR0,r4
181	mtspr	SPRN_SRR1,r5
182
183	ld	r3,_CTR(r1)
184	ld	r4,_LINK(r1)
185	ld	r5,_XER(r1)
186
187	REST_NVGPRS(r1)
188	ld	r0,GPR0(r1)
189	mtcr	r2
190	mtctr	r3
191	mtlr	r4
192	mtspr	SPRN_XER,r5
193	REST_10GPRS(2, r1)
194	REST_2GPRS(12, r1)
195	ld	r1,GPR1(r1)
196	RFI_TO_USER
197.Lsyscall_vectored_\name\()_rst_end:
198
199syscall_vectored_\name\()_restart:
200_ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
201	GET_PACA(r13)
202	ld	r1,PACA_EXIT_SAVE_R1(r13)
203	ld	r2,PACATOC(r13)
204	ld	r3,RESULT(r1)
205	addi	r4,r1,STACK_FRAME_OVERHEAD
206	li	r11,IRQS_ALL_DISABLED
207	stb	r11,PACAIRQSOFTMASK(r13)
208	bl	syscall_exit_restart
209	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
210	b	.Lsyscall_vectored_\name\()_rst_start
2111:
212
213SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
214RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
215
216.endm
217
218system_call_vectored common 0x3000
219
220/*
221 * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
222 * which is tested by system_call_exception when r0 is -1 (as set by vector
223 * entry code).
224 */
225system_call_vectored sigill 0x7ff0
226
227
228/*
229 * Entered via kernel return set up by kernel/sstep.c, must match entry regs
230 */
231	.globl system_call_vectored_emulate
232system_call_vectored_emulate:
233_ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate)
234	li	r10,IRQS_ALL_DISABLED
235	stb	r10,PACAIRQSOFTMASK(r13)
236	b	system_call_vectored_common
237#endif /* CONFIG_PPC_BOOK3S */
238
239	.balign IFETCH_ALIGN_BYTES
240	.globl system_call_common_real
241system_call_common_real:
242_ASM_NOKPROBE_SYMBOL(system_call_common_real)
243	ld	r10,PACAKMSR(r13)	/* get MSR value for kernel */
244	mtmsrd	r10
245
246	.balign IFETCH_ALIGN_BYTES
247	.globl system_call_common
248system_call_common:
249_ASM_NOKPROBE_SYMBOL(system_call_common)
250#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
251BEGIN_FTR_SECTION
252	extrdi.	r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
253	bne	tabort_syscall
254END_FTR_SECTION_IFSET(CPU_FTR_TM)
255#endif
256	mr	r10,r1
257	ld	r1,PACAKSAVE(r13)
258	std	r10,0(r1)
259	std	r11,_NIP(r1)
260	std	r12,_MSR(r1)
261	std	r0,GPR0(r1)
262	std	r10,GPR1(r1)
263	std	r2,GPR2(r1)
264#ifdef CONFIG_PPC_FSL_BOOK3E
265START_BTB_FLUSH_SECTION
266	BTB_FLUSH(r10)
267END_BTB_FLUSH_SECTION
268#endif
269	ld	r2,PACATOC(r13)
270	mfcr	r12
271	li	r11,0
272	/* Can we avoid saving r3-r8 in common case? */
273	std	r3,GPR3(r1)
274	std	r4,GPR4(r1)
275	std	r5,GPR5(r1)
276	std	r6,GPR6(r1)
277	std	r7,GPR7(r1)
278	std	r8,GPR8(r1)
279	/* Zero r9-r12, this should only be required when restoring all GPRs */
280	std	r11,GPR9(r1)
281	std	r11,GPR10(r1)
282	std	r11,GPR11(r1)
283	std	r11,GPR12(r1)
284	std	r9,GPR13(r1)
285	SAVE_NVGPRS(r1)
286	std	r11,_XER(r1)
287	std	r11,_CTR(r1)
288	mflr	r10
289
290	/*
291	 * This clears CR0.SO (bit 28), which is the error indication on
292	 * return from this system call.
293	 */
294	rldimi	r12,r11,28,(63-28)
295	li	r11,0xc00
296	std	r10,_LINK(r1)
297	std	r11,_TRAP(r1)
298	std	r12,_CCR(r1)
299	addi	r10,r1,STACK_FRAME_OVERHEAD
300	ld	r11,exception_marker@toc(r2)
301	std	r11,-16(r10)		/* "regshere" marker */
302
303#ifdef CONFIG_PPC_BOOK3S
304	li	r11,1
305	stb	r11,PACASRR_VALID(r13)
306#endif
307
308	/*
309	 * We always enter kernel from userspace with irq soft-mask enabled and
310	 * nothing pending. system_call_exception() will call
311	 * trace_hardirqs_off().
312	 */
313	li	r11,IRQS_ALL_DISABLED
314	li	r12,-1 /* Set MSR_EE and MSR_RI */
315	stb	r11,PACAIRQSOFTMASK(r13)
316	mtmsrd	r12,1
317
318	/* Calling convention has r9 = orig r0, r10 = regs */
319	mr	r9,r0
320	bl	system_call_exception
321
322.Lsyscall_exit:
323	addi	r4,r1,STACK_FRAME_OVERHEAD
324	li	r5,0 /* !scv */
325	bl	syscall_exit_prepare
326	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
327#ifdef CONFIG_PPC_BOOK3S
328.Lsyscall_rst_start:
329	lbz	r11,PACAIRQHAPPENED(r13)
330	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
331	bne-	syscall_restart
332#endif
333	li	r11,IRQS_ENABLED
334	stb	r11,PACAIRQSOFTMASK(r13)
335	li	r11,0
336	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
337
338	ld	r2,_CCR(r1)
339	ld	r6,_LINK(r1)
340	mtlr	r6
341
342#ifdef CONFIG_PPC_BOOK3S
343	lbz	r4,PACASRR_VALID(r13)
344	cmpdi	r4,0
345	bne	1f
346	li	r4,0
347	stb	r4,PACASRR_VALID(r13)
348#endif
349	ld	r4,_NIP(r1)
350	ld	r5,_MSR(r1)
351	mtspr	SPRN_SRR0,r4
352	mtspr	SPRN_SRR1,r5
3531:
354	DEBUG_SRR_VALID srr
355
356BEGIN_FTR_SECTION
357	stdcx.	r0,0,r1			/* to clear the reservation */
358END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
359
360	cmpdi	r3,0
361	bne	.Lsyscall_restore_regs
362	/* Zero volatile regs that may contain sensitive kernel data */
363	li	r0,0
364	li	r4,0
365	li	r5,0
366	li	r6,0
367	li	r7,0
368	li	r8,0
369	li	r9,0
370	li	r10,0
371	li	r11,0
372	li	r12,0
373	mtctr	r0
374	mtspr	SPRN_XER,r0
375.Lsyscall_restore_regs_cont:
376
377BEGIN_FTR_SECTION
378	HMT_MEDIUM_LOW
379END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
380
381	/*
382	 * We don't need to restore AMR on the way back to userspace for KUAP.
383	 * The value of AMR only matters while we're in the kernel.
384	 */
385	mtcr	r2
386	ld	r2,GPR2(r1)
387	ld	r3,GPR3(r1)
388	ld	r13,GPR13(r1)
389	ld	r1,GPR1(r1)
390	RFI_TO_USER
391	b	.	/* prevent speculative execution */
392
393.Lsyscall_restore_regs:
394	ld	r3,_CTR(r1)
395	ld	r4,_XER(r1)
396	REST_NVGPRS(r1)
397	mtctr	r3
398	mtspr	SPRN_XER,r4
399	ld	r0,GPR0(r1)
400	REST_8GPRS(4, r1)
401	ld	r12,GPR12(r1)
402	b	.Lsyscall_restore_regs_cont
403.Lsyscall_rst_end:
404
405#ifdef CONFIG_PPC_BOOK3S
406syscall_restart:
407_ASM_NOKPROBE_SYMBOL(syscall_restart)
408	GET_PACA(r13)
409	ld	r1,PACA_EXIT_SAVE_R1(r13)
410	ld	r2,PACATOC(r13)
411	ld	r3,RESULT(r1)
412	addi	r4,r1,STACK_FRAME_OVERHEAD
413	li	r11,IRQS_ALL_DISABLED
414	stb	r11,PACAIRQSOFTMASK(r13)
415	bl	syscall_exit_restart
416	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
417	b	.Lsyscall_rst_start
4181:
419
420SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
421RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
422#endif
423
424#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
425tabort_syscall:
426_ASM_NOKPROBE_SYMBOL(tabort_syscall)
427	/* Firstly we need to enable TM in the kernel */
428	mfmsr	r10
429	li	r9, 1
430	rldimi	r10, r9, MSR_TM_LG, 63-MSR_TM_LG
431	mtmsrd	r10, 0
432
433	/* tabort, this dooms the transaction, nothing else */
434	li	r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
435	TABORT(R9)
436
437	/*
438	 * Return directly to userspace. We have corrupted user register state,
439	 * but userspace will never see that register state. Execution will
440	 * resume after the tbegin of the aborted transaction with the
441	 * checkpointed register state.
442	 */
443	li	r9, MSR_RI
444	andc	r10, r10, r9
445	mtmsrd	r10, 1
446	mtspr	SPRN_SRR0, r11
447	mtspr	SPRN_SRR1, r12
448	RFI_TO_USER
449	b	.	/* prevent speculative execution */
450#endif
451
452	/*
453	 * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
454	 * touched, no exit work created, then this can be used.
455	 */
456	.balign IFETCH_ALIGN_BYTES
457	.globl fast_interrupt_return_srr
458fast_interrupt_return_srr:
459_ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
460	kuap_check_amr r3, r4
461	ld	r5,_MSR(r1)
462	andi.	r0,r5,MSR_PR
463#ifdef CONFIG_PPC_BOOK3S
464	beq	1f
465	kuap_user_restore r3, r4
466	b	.Lfast_user_interrupt_return_srr
4671:	kuap_kernel_restore r3, r4
468	andi.	r0,r5,MSR_RI
469	li	r3,0 /* 0 return value, no EMULATE_STACK_STORE */
470	bne+	.Lfast_kernel_interrupt_return_srr
471	addi	r3,r1,STACK_FRAME_OVERHEAD
472	bl	unrecoverable_exception
473	b	. /* should not get here */
474#else
475	bne	.Lfast_user_interrupt_return_srr
476	b	.Lfast_kernel_interrupt_return_srr
477#endif
478
479.macro interrupt_return_macro srr
480	.balign IFETCH_ALIGN_BYTES
481	.globl interrupt_return_\srr
482interrupt_return_\srr\():
483_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
484	ld	r4,_MSR(r1)
485	andi.	r0,r4,MSR_PR
486	beq	interrupt_return_\srr\()_kernel
487interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
488_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
489	addi	r3,r1,STACK_FRAME_OVERHEAD
490	bl	interrupt_exit_user_prepare
491	cmpdi	r3,0
492	bne-	.Lrestore_nvgprs_\srr
493.Lrestore_nvgprs_\srr\()_cont:
494	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
495#ifdef CONFIG_PPC_BOOK3S
496.Linterrupt_return_\srr\()_user_rst_start:
497	lbz	r11,PACAIRQHAPPENED(r13)
498	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
499	bne-	interrupt_return_\srr\()_user_restart
500#endif
501	li	r11,IRQS_ENABLED
502	stb	r11,PACAIRQSOFTMASK(r13)
503	li	r11,0
504	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
505
506.Lfast_user_interrupt_return_\srr\():
507#ifdef CONFIG_PPC_BOOK3S
508	.ifc \srr,srr
509	lbz	r4,PACASRR_VALID(r13)
510	.else
511	lbz	r4,PACAHSRR_VALID(r13)
512	.endif
513	cmpdi	r4,0
514	li	r4,0
515	bne	1f
516#endif
517	ld	r11,_NIP(r1)
518	ld	r12,_MSR(r1)
519	.ifc \srr,srr
520	mtspr	SPRN_SRR0,r11
521	mtspr	SPRN_SRR1,r12
5221:
523#ifdef CONFIG_PPC_BOOK3S
524	stb	r4,PACASRR_VALID(r13)
525#endif
526	.else
527	mtspr	SPRN_HSRR0,r11
528	mtspr	SPRN_HSRR1,r12
5291:
530#ifdef CONFIG_PPC_BOOK3S
531	stb	r4,PACAHSRR_VALID(r13)
532#endif
533	.endif
534	DEBUG_SRR_VALID \srr
535
536#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
537	lbz	r4,PACAIRQSOFTMASK(r13)
538	tdnei	r4,IRQS_ENABLED
539#endif
540
541BEGIN_FTR_SECTION
542	ld	r10,_PPR(r1)
543	mtspr	SPRN_PPR,r10
544END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
545
546BEGIN_FTR_SECTION
547	stdcx.	r0,0,r1		/* to clear the reservation */
548FTR_SECTION_ELSE
549	ldarx	r0,0,r1
550ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
551
552	ld	r3,_CCR(r1)
553	ld	r4,_LINK(r1)
554	ld	r5,_CTR(r1)
555	ld	r6,_XER(r1)
556	li	r0,0
557
558	REST_4GPRS(7, r1)
559	REST_2GPRS(11, r1)
560	REST_GPR(13, r1)
561
562	mtcr	r3
563	mtlr	r4
564	mtctr	r5
565	mtspr	SPRN_XER,r6
566
567	REST_4GPRS(2, r1)
568	REST_GPR(6, r1)
569	REST_GPR(0, r1)
570	REST_GPR(1, r1)
571	.ifc \srr,srr
572	RFI_TO_USER
573	.else
574	HRFI_TO_USER
575	.endif
576	b	.	/* prevent speculative execution */
577.Linterrupt_return_\srr\()_user_rst_end:
578
579.Lrestore_nvgprs_\srr\():
580	REST_NVGPRS(r1)
581	b	.Lrestore_nvgprs_\srr\()_cont
582
583#ifdef CONFIG_PPC_BOOK3S
584interrupt_return_\srr\()_user_restart:
585_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
586	GET_PACA(r13)
587	ld	r1,PACA_EXIT_SAVE_R1(r13)
588	ld	r2,PACATOC(r13)
589	addi	r3,r1,STACK_FRAME_OVERHEAD
590	li	r11,IRQS_ALL_DISABLED
591	stb	r11,PACAIRQSOFTMASK(r13)
592	bl	interrupt_exit_user_restart
593	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
594	b	.Linterrupt_return_\srr\()_user_rst_start
5951:
596
597SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
598RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
599#endif
600
601	.balign IFETCH_ALIGN_BYTES
602interrupt_return_\srr\()_kernel:
603_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
604	addi	r3,r1,STACK_FRAME_OVERHEAD
605	bl	interrupt_exit_kernel_prepare
606
607	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
608.Linterrupt_return_\srr\()_kernel_rst_start:
609	ld	r11,SOFTE(r1)
610	cmpwi	r11,IRQS_ENABLED
611	stb	r11,PACAIRQSOFTMASK(r13)
612	bne	1f
613#ifdef CONFIG_PPC_BOOK3S
614	lbz	r11,PACAIRQHAPPENED(r13)
615	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
616	bne-	interrupt_return_\srr\()_kernel_restart
617#endif
618	li	r11,0
619	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
6201:
621
622.Lfast_kernel_interrupt_return_\srr\():
623	cmpdi	cr1,r3,0
624#ifdef CONFIG_PPC_BOOK3S
625	.ifc \srr,srr
626	lbz	r4,PACASRR_VALID(r13)
627	.else
628	lbz	r4,PACAHSRR_VALID(r13)
629	.endif
630	cmpdi	r4,0
631	li	r4,0
632	bne	1f
633#endif
634	ld	r11,_NIP(r1)
635	ld	r12,_MSR(r1)
636	.ifc \srr,srr
637	mtspr	SPRN_SRR0,r11
638	mtspr	SPRN_SRR1,r12
6391:
640#ifdef CONFIG_PPC_BOOK3S
641	stb	r4,PACASRR_VALID(r13)
642#endif
643	.else
644	mtspr	SPRN_HSRR0,r11
645	mtspr	SPRN_HSRR1,r12
6461:
647#ifdef CONFIG_PPC_BOOK3S
648	stb	r4,PACAHSRR_VALID(r13)
649#endif
650	.endif
651	DEBUG_SRR_VALID \srr
652
653BEGIN_FTR_SECTION
654	stdcx.	r0,0,r1		/* to clear the reservation */
655FTR_SECTION_ELSE
656	ldarx	r0,0,r1
657ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
658
659	ld	r3,_LINK(r1)
660	ld	r4,_CTR(r1)
661	ld	r5,_XER(r1)
662	ld	r6,_CCR(r1)
663	li	r0,0
664
665	REST_4GPRS(7, r1)
666	REST_2GPRS(11, r1)
667
668	mtlr	r3
669	mtctr	r4
670	mtspr	SPRN_XER,r5
671
672	/*
673	 * Leaving a stale exception_marker on the stack can confuse
674	 * the reliable stack unwinder later on. Clear it.
675	 */
676	std	r0,STACK_FRAME_OVERHEAD-16(r1)
677
678	REST_4GPRS(2, r1)
679
680	bne-	cr1,1f /* emulate stack store */
681	mtcr	r6
682	REST_GPR(6, r1)
683	REST_GPR(0, r1)
684	REST_GPR(1, r1)
685	.ifc \srr,srr
686	RFI_TO_KERNEL
687	.else
688	HRFI_TO_KERNEL
689	.endif
690	b	.	/* prevent speculative execution */
691
6921:	/*
693	 * Emulate stack store with update. New r1 value was already calculated
694	 * and updated in our interrupt regs by emulate_loadstore, but we can't
695	 * store the previous value of r1 to the stack before re-loading our
696	 * registers from it, otherwise they could be clobbered.  Use
697	 * PACA_EXGEN as temporary storage to hold the store data, as
698	 * interrupts are disabled here so it won't be clobbered.
699	 */
700	mtcr	r6
701	std	r9,PACA_EXGEN+0(r13)
702	addi	r9,r1,INT_FRAME_SIZE /* get original r1 */
703	REST_GPR(6, r1)
704	REST_GPR(0, r1)
705	REST_GPR(1, r1)
706	std	r9,0(r1) /* perform store component of stdu */
707	ld	r9,PACA_EXGEN+0(r13)
708
709	.ifc \srr,srr
710	RFI_TO_KERNEL
711	.else
712	HRFI_TO_KERNEL
713	.endif
714	b	.	/* prevent speculative execution */
715.Linterrupt_return_\srr\()_kernel_rst_end:
716
717#ifdef CONFIG_PPC_BOOK3S
718interrupt_return_\srr\()_kernel_restart:
719_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
720	GET_PACA(r13)
721	ld	r1,PACA_EXIT_SAVE_R1(r13)
722	ld	r2,PACATOC(r13)
723	addi	r3,r1,STACK_FRAME_OVERHEAD
724	li	r11,IRQS_ALL_DISABLED
725	stb	r11,PACAIRQSOFTMASK(r13)
726	bl	interrupt_exit_kernel_restart
727	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
728	b	.Linterrupt_return_\srr\()_kernel_rst_start
7291:
730
731SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
732RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
733#endif
734
735.endm
736
737interrupt_return_macro srr
738#ifdef CONFIG_PPC_BOOK3S
739interrupt_return_macro hsrr
740
741	.globl __end_soft_masked
742__end_soft_masked:
743DEFINE_FIXED_SYMBOL(__end_soft_masked)
744#endif /* CONFIG_PPC_BOOK3S */
745
746#ifdef CONFIG_PPC_BOOK3S
747_GLOBAL(ret_from_fork_scv)
748	bl	schedule_tail
749	REST_NVGPRS(r1)
750	li	r3,0	/* fork() return value */
751	b	.Lsyscall_vectored_common_exit
752#endif
753
754_GLOBAL(ret_from_fork)
755	bl	schedule_tail
756	REST_NVGPRS(r1)
757	li	r3,0	/* fork() return value */
758	b	.Lsyscall_exit
759
760_GLOBAL(ret_from_kernel_thread)
761	bl	schedule_tail
762	REST_NVGPRS(r1)
763	mtctr	r14
764	mr	r3,r15
765#ifdef PPC64_ELF_ABI_v2
766	mr	r12,r14
767#endif
768	bctrl
769	li	r3,0
770	b	.Lsyscall_exit
771