xref: /linux/arch/s390/kernel/entry.S (revision a544684b790f3e9f75173b3b42d7dad1c89dd237)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *    S390 low-level entry points.
4 *
5 *    Copyright IBM Corp. 1999, 2012
6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 *		 Hartmut Penner (hp@de.ibm.com),
8 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 *		 Heiko Carstens <heiko.carstens@de.ibm.com>
10 */
11
12#include <linux/init.h>
13#include <linux/linkage.h>
14#include <asm/alternative-asm.h>
15#include <asm/processor.h>
16#include <asm/cache.h>
17#include <asm/dwarf.h>
18#include <asm/errno.h>
19#include <asm/ptrace.h>
20#include <asm/thread_info.h>
21#include <asm/asm-offsets.h>
22#include <asm/unistd.h>
23#include <asm/page.h>
24#include <asm/sigp.h>
25#include <asm/irq.h>
26#include <asm/vx-insn.h>
27#include <asm/setup.h>
28#include <asm/nmi.h>
29#include <asm/export.h>
30#include <asm/nospec-insn.h>
31
32__PT_R0      =	__PT_GPRS
33__PT_R1      =	__PT_GPRS + 8
34__PT_R2      =	__PT_GPRS + 16
35__PT_R3      =	__PT_GPRS + 24
36__PT_R4      =	__PT_GPRS + 32
37__PT_R5      =	__PT_GPRS + 40
38__PT_R6      =	__PT_GPRS + 48
39__PT_R7      =	__PT_GPRS + 56
40__PT_R8      =	__PT_GPRS + 64
41__PT_R9      =	__PT_GPRS + 72
42__PT_R10     =	__PT_GPRS + 80
43__PT_R11     =	__PT_GPRS + 88
44__PT_R12     =	__PT_GPRS + 96
45__PT_R13     =	__PT_GPRS + 104
46__PT_R14     =	__PT_GPRS + 112
47__PT_R15     =	__PT_GPRS + 120
48
49STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
50STACK_SIZE  = 1 << STACK_SHIFT
51STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
52
53_LPP_OFFSET	= __LC_LPP
54
55	.macro STBEAR address
56	ALTERNATIVE "", ".insn	s,0xb2010000,\address", 193
57	.endm
58
59	.macro LBEAR address
60	ALTERNATIVE "", ".insn	s,0xb2000000,\address", 193
61	.endm
62
63	.macro LPSWEY address,lpswe
64	ALTERNATIVE "b \lpswe", ".insn siy,0xeb0000000071,\address,0", 193
65	.endm
66
67	.macro MBEAR reg
68	ALTERNATIVE "", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193
69	.endm
70
71	.macro	CHECK_STACK savearea
72#ifdef CONFIG_CHECK_STACK
73	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
74	lghi	%r14,\savearea
75	jz	stack_overflow
76#endif
77	.endm
78
79	.macro	CHECK_VMAP_STACK savearea,oklabel
80#ifdef CONFIG_VMAP_STACK
81	lgr	%r14,%r15
82	nill	%r14,0x10000 - STACK_SIZE
83	oill	%r14,STACK_INIT
84	clg	%r14,__LC_KERNEL_STACK
85	je	\oklabel
86	clg	%r14,__LC_ASYNC_STACK
87	je	\oklabel
88	clg	%r14,__LC_MCCK_STACK
89	je	\oklabel
90	clg	%r14,__LC_NODAT_STACK
91	je	\oklabel
92	clg	%r14,__LC_RESTART_STACK
93	je	\oklabel
94	lghi	%r14,\savearea
95	j	stack_overflow
96#else
97	j	\oklabel
98#endif
99	.endm
100
101	.macro STCK savearea
102	ALTERNATIVE ".insn	s,0xb2050000,\savearea", \
103		    ".insn	s,0xb27c0000,\savearea", 25
104	.endm
105
106	/*
107	 * The TSTMSK macro generates a test-under-mask instruction by
108	 * calculating the memory offset for the specified mask value.
109	 * Mask value can be any constant.  The macro shifts the mask
110	 * value to calculate the memory offset for the test-under-mask
111	 * instruction.
112	 */
113	.macro TSTMSK addr, mask, size=8, bytepos=0
114		.if (\bytepos < \size) && (\mask >> 8)
115			.if (\mask & 0xff)
116				.error "Mask exceeds byte boundary"
117			.endif
118			TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
119			.exitm
120		.endif
121		.ifeq \mask
122			.error "Mask must not be zero"
123		.endif
124		off = \size - \bytepos - 1
125		tm	off+\addr, \mask
126	.endm
127
128	.macro BPOFF
129	ALTERNATIVE "", ".long 0xb2e8c000", 82
130	.endm
131
132	.macro BPON
133	ALTERNATIVE "", ".long 0xb2e8d000", 82
134	.endm
135
136	.macro BPENTER tif_ptr,tif_mask
137	ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \
138		    "", 82
139	.endm
140
141	.macro BPEXIT tif_ptr,tif_mask
142	TSTMSK	\tif_ptr,\tif_mask
143	ALTERNATIVE "jz .+8;  .long 0xb2e8c000", \
144		    "jnz .+8; .long 0xb2e8d000", 82
145	.endm
146
147	/*
148	 * The CHKSTG macro jumps to the provided label in case the
149	 * machine check interruption code reports one of unrecoverable
150	 * storage errors:
151	 * - Storage error uncorrected
152	 * - Storage key error uncorrected
153	 * - Storage degradation with Failing-storage-address validity
154	 */
155	.macro CHKSTG errlabel
156	TSTMSK	__LC_MCCK_CODE,(MCCK_CODE_STG_ERROR|MCCK_CODE_STG_KEY_ERROR)
157	jnz	\errlabel
158	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_STG_DEGRAD
159	jz	.Loklabel\@
160	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_STG_FAIL_ADDR
161	jnz	\errlabel
162.Loklabel\@:
163	.endm
164
165#if IS_ENABLED(CONFIG_KVM)
166	/*
167	 * The OUTSIDE macro jumps to the provided label in case the value
168	 * in the provided register is outside of the provided range. The
169	 * macro is useful for checking whether a PSW stored in a register
170	 * pair points inside or outside of a block of instructions.
171	 * @reg: register to check
172	 * @start: start of the range
173	 * @end: end of the range
174	 * @outside_label: jump here if @reg is outside of [@start..@end)
175	 */
176	.macro OUTSIDE reg,start,end,outside_label
177	lgr	%r14,\reg
178	larl	%r13,\start
179	slgr	%r14,%r13
180	lghi	%r13,\end - \start
181	clgr	%r14,%r13
182	jhe	\outside_label
183	.endm
184
185	.macro SIEEXIT
186	lg	%r9,__SF_SIE_CONTROL(%r15)	# get control block pointer
187	ni	__SIE_PROG0C+3(%r9),0xfe	# no longer in SIE
188	lctlg	%c1,%c1,__LC_KERNEL_ASCE	# load primary asce
189	larl	%r9,sie_exit			# skip forward to sie_exit
190	.endm
191#endif
192
193	GEN_BR_THUNK %r14
194	GEN_BR_THUNK %r14,%r13
195
196	.section .kprobes.text, "ax"
197.Ldummy:
198	/*
199	 * This nop exists only in order to avoid that __bpon starts at
200	 * the beginning of the kprobes text section. In that case we would
201	 * have several symbols at the same address. E.g. objdump would take
202	 * an arbitrary symbol name when disassembling this code.
203	 * With the added nop in between the __bpon symbol is unique
204	 * again.
205	 */
206	nop	0
207
208ENTRY(__bpon)
209	.globl __bpon
210	BPON
211	BR_EX	%r14
212ENDPROC(__bpon)
213
214/*
215 * Scheduler resume function, called by switch_to
216 *  gpr2 = (task_struct *) prev
217 *  gpr3 = (task_struct *) next
218 * Returns:
219 *  gpr2 = prev
220 */
221ENTRY(__switch_to)
222	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
223	lghi	%r4,__TASK_stack
224	lghi	%r1,__TASK_thread
225	llill	%r5,STACK_INIT
226	stg	%r15,__THREAD_ksp(%r1,%r2)	# store kernel stack of prev
227	lg	%r15,0(%r4,%r3)			# start of kernel stack of next
228	agr	%r15,%r5			# end of kernel stack of next
229	stg	%r3,__LC_CURRENT		# store task struct of next
230	stg	%r15,__LC_KERNEL_STACK		# store end of kernel stack
231	lg	%r15,__THREAD_ksp(%r1,%r3)	# load kernel stack of next
232	aghi	%r3,__TASK_pid
233	mvc	__LC_CURRENT_PID(4,%r0),0(%r3)	# store pid of next
234	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
235	ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
236	BR_EX	%r14
237ENDPROC(__switch_to)
238
239#if IS_ENABLED(CONFIG_KVM)
240/*
241 * sie64a calling convention:
242 * %r2 pointer to sie control block
243 * %r3 guest register save area
244 */
245ENTRY(sie64a)
246	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
247	lg	%r12,__LC_CURRENT
248	stg	%r2,__SF_SIE_CONTROL(%r15)	# save control block pointer
249	stg	%r3,__SF_SIE_SAVEAREA(%r15)	# save guest register save area
250	xc	__SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
251	mvc	__SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
252	lmg	%r0,%r13,0(%r3)			# load guest gprs 0-13
253	lg	%r14,__LC_GMAP			# get gmap pointer
254	ltgr	%r14,%r14
255	jz	.Lsie_gmap
256	lctlg	%c1,%c1,__GMAP_ASCE(%r14)	# load primary asce
257.Lsie_gmap:
258	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
259	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
260	tm	__SIE_PROG20+3(%r14),3		# last exit...
261	jnz	.Lsie_skip
262	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
263	jo	.Lsie_skip			# exit if fp/vx regs changed
264	BPEXIT	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
265.Lsie_entry:
266	sie	0(%r14)
267	BPOFF
268	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
269.Lsie_skip:
270	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
271	lctlg	%c1,%c1,__LC_KERNEL_ASCE	# load primary asce
272.Lsie_done:
273# some program checks are suppressing. C code (e.g. do_protection_exception)
274# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
275# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
276# Other instructions between sie64a and .Lsie_done should not cause program
277# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
278.Lrewind_pad6:
279	nopr	7
280.Lrewind_pad4:
281	nopr	7
282.Lrewind_pad2:
283	nopr	7
284	.globl sie_exit
285sie_exit:
286	lg	%r14,__SF_SIE_SAVEAREA(%r15)	# load guest register save area
287	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
288	xgr	%r0,%r0				# clear guest registers to
289	xgr	%r1,%r1				# prevent speculative use
290	xgr	%r3,%r3
291	xgr	%r4,%r4
292	xgr	%r5,%r5
293	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
294	lg	%r2,__SF_SIE_REASON(%r15)	# return exit reason code
295	BR_EX	%r14
296.Lsie_fault:
297	lghi	%r14,-EFAULT
298	stg	%r14,__SF_SIE_REASON(%r15)	# set exit reason code
299	j	sie_exit
300
301	EX_TABLE(.Lrewind_pad6,.Lsie_fault)
302	EX_TABLE(.Lrewind_pad4,.Lsie_fault)
303	EX_TABLE(.Lrewind_pad2,.Lsie_fault)
304	EX_TABLE(sie_exit,.Lsie_fault)
305ENDPROC(sie64a)
306EXPORT_SYMBOL(sie64a)
307EXPORT_SYMBOL(sie_exit)
308#endif
309
310/*
311 * SVC interrupt handler routine. System calls are synchronous events and
312 * are entered with interrupts disabled.
313 */
314
315ENTRY(system_call)
316	stpt	__LC_SYS_ENTER_TIMER
317	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
318	BPOFF
319	lghi	%r14,0
320.Lsysc_per:
321	STBEAR	__LC_LAST_BREAK
322	lctlg	%c1,%c1,__LC_KERNEL_ASCE
323	lg	%r12,__LC_CURRENT
324	lg	%r15,__LC_KERNEL_STACK
325	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
326	stmg	%r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
327	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
328	# clear user controlled register to prevent speculative use
329	xgr	%r0,%r0
330	xgr	%r1,%r1
331	xgr	%r4,%r4
332	xgr	%r5,%r5
333	xgr	%r6,%r6
334	xgr	%r7,%r7
335	xgr	%r8,%r8
336	xgr	%r9,%r9
337	xgr	%r10,%r10
338	xgr	%r11,%r11
339	la	%r2,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
340	mvc	__PT_R8(64,%r2),__LC_SAVE_AREA_SYNC
341	MBEAR	%r2
342	lgr	%r3,%r14
343	brasl	%r14,__do_syscall
344	lctlg	%c1,%c1,__LC_USER_ASCE
345	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
346	BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
347	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
348	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
349	stpt	__LC_EXIT_TIMER
350	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
351ENDPROC(system_call)
352
353#
354# a new process exits the kernel with ret_from_fork
355#
356ENTRY(ret_from_fork)
357	lgr	%r3,%r11
358	brasl	%r14,__ret_from_fork
359	lctlg	%c1,%c1,__LC_USER_ASCE
360	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
361	BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
362	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
363	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
364	stpt	__LC_EXIT_TIMER
365	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
366ENDPROC(ret_from_fork)
367
368/*
369 * Program check handler routine
370 */
371
372ENTRY(pgm_check_handler)
373	stpt	__LC_SYS_ENTER_TIMER
374	BPOFF
375	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
376	lg	%r12,__LC_CURRENT
377	lghi	%r10,0
378	lmg	%r8,%r9,__LC_PGM_OLD_PSW
379	tmhh	%r8,0x0001		# coming from user space?
380	jno	.Lpgm_skip_asce
381	lctlg	%c1,%c1,__LC_KERNEL_ASCE
382	j	3f			# -> fault in user space
383.Lpgm_skip_asce:
384#if IS_ENABLED(CONFIG_KVM)
385	# cleanup critical section for program checks in sie64a
386	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,1f
387	SIEEXIT
388	lghi	%r10,_PIF_GUEST_FAULT
389#endif
3901:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
391	jnz	2f			# -> enabled, can't be a double fault
392	tm	__LC_PGM_ILC+3,0x80	# check for per exception
393	jnz	.Lpgm_svcper		# -> single stepped svc
3942:	CHECK_STACK __LC_SAVE_AREA_SYNC
395	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
396	# CHECK_VMAP_STACK branches to stack_overflow or 4f
397	CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
3983:	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
399	lg	%r15,__LC_KERNEL_STACK
4004:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
401	stg	%r10,__PT_FLAGS(%r11)
402	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
403	stmg	%r0,%r7,__PT_R0(%r11)
404	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
405	mvc	__PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK
406	stmg	%r8,%r9,__PT_PSW(%r11)
407
408	# clear user controlled registers to prevent speculative use
409	xgr	%r0,%r0
410	xgr	%r1,%r1
411	xgr	%r3,%r3
412	xgr	%r4,%r4
413	xgr	%r5,%r5
414	xgr	%r6,%r6
415	xgr	%r7,%r7
416	lgr	%r2,%r11
417	brasl	%r14,__do_pgm_check
418	tmhh	%r8,0x0001		# returning to user space?
419	jno	.Lpgm_exit_kernel
420	lctlg	%c1,%c1,__LC_USER_ASCE
421	BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
422	stpt	__LC_EXIT_TIMER
423.Lpgm_exit_kernel:
424	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
425	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
426	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
427	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
428
429#
430# single stepped system call
431#
432.Lpgm_svcper:
433	mvc	__LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
434	larl	%r14,.Lsysc_per
435	stg	%r14,__LC_RETURN_PSW+8
436	lghi	%r14,1
437	LBEAR	__LC_PGM_LAST_BREAK
438	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per
439ENDPROC(pgm_check_handler)
440
441/*
442 * Interrupt handler macro used for external and IO interrupts.
443 */
444.macro INT_HANDLER name,lc_old_psw,handler
445ENTRY(\name)
446	STCK	__LC_INT_CLOCK
447	stpt	__LC_SYS_ENTER_TIMER
448	STBEAR	__LC_LAST_BREAK
449	BPOFF
450	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
451	lg	%r12,__LC_CURRENT
452	lmg	%r8,%r9,\lc_old_psw
453	tmhh	%r8,0x0001			# interrupting from user ?
454	jnz	1f
455#if IS_ENABLED(CONFIG_KVM)
456	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,0f
457	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
458	SIEEXIT
459#endif
4600:	CHECK_STACK __LC_SAVE_AREA_ASYNC
461	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
462	j	2f
4631:	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
464	lctlg	%c1,%c1,__LC_KERNEL_ASCE
465	lg	%r15,__LC_KERNEL_STACK
4662:	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
467	la	%r11,STACK_FRAME_OVERHEAD(%r15)
468	stmg	%r0,%r7,__PT_R0(%r11)
469	# clear user controlled registers to prevent speculative use
470	xgr	%r0,%r0
471	xgr	%r1,%r1
472	xgr	%r3,%r3
473	xgr	%r4,%r4
474	xgr	%r5,%r5
475	xgr	%r6,%r6
476	xgr	%r7,%r7
477	xgr	%r10,%r10
478	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
479	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
480	MBEAR	%r11
481	stmg	%r8,%r9,__PT_PSW(%r11)
482	tm	%r8,0x0001		# coming from user space?
483	jno	1f
484	lctlg	%c1,%c1,__LC_KERNEL_ASCE
4851:	lgr	%r2,%r11		# pass pointer to pt_regs
486	brasl	%r14,\handler
487	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
488	tmhh	%r8,0x0001		# returning to user ?
489	jno	2f
490	lctlg	%c1,%c1,__LC_USER_ASCE
491	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
492	stpt	__LC_EXIT_TIMER
4932:	LBEAR	__PT_LAST_BREAK(%r11)
494	lmg	%r0,%r15,__PT_R0(%r11)
495	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
496ENDPROC(\name)
497.endm
498
499INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
500INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
501
502/*
503 * Load idle PSW.
504 */
505ENTRY(psw_idle)
506	stg	%r14,(__SF_GPRS+8*8)(%r15)
507	stg	%r3,__SF_EMPTY(%r15)
508	larl	%r1,psw_idle_exit
509	stg	%r1,__SF_EMPTY+8(%r15)
510	larl	%r1,smp_cpu_mtid
511	llgf	%r1,0(%r1)
512	ltgr	%r1,%r1
513	jz	.Lpsw_idle_stcctm
514	.insn	rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2)
515.Lpsw_idle_stcctm:
516	oi	__LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
517	BPON
518	STCK	__CLOCK_IDLE_ENTER(%r2)
519	stpt	__TIMER_IDLE_ENTER(%r2)
520	lpswe	__SF_EMPTY(%r15)
521.globl psw_idle_exit
522psw_idle_exit:
523	BR_EX	%r14
524ENDPROC(psw_idle)
525
526/*
527 * Machine check handler routines
528 */
529ENTRY(mcck_int_handler)
530	STCK	__LC_MCCK_CLOCK
531	BPOFF
532	la	%r1,4095		# validate r1
533	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# validate cpu timer
534	LBEAR	__LC_LAST_BREAK_SAVE_AREA-4095(%r1)		# validate bear
535	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
536	lg	%r12,__LC_CURRENT
537	lmg	%r8,%r9,__LC_MCK_OLD_PSW
538	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
539	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
540	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CR_VALID
541	jno	.Lmcck_panic		# control registers invalid -> panic
542	la	%r14,4095
543	lctlg	%c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
544	ptlb
545	lghi	%r14,__LC_CPU_TIMER_SAVE_AREA
546	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
547	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
548	jo	3f
549	la	%r14,__LC_SYS_ENTER_TIMER
550	clc	0(8,%r14),__LC_EXIT_TIMER
551	jl	1f
552	la	%r14,__LC_EXIT_TIMER
5531:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
554	jl	2f
555	la	%r14,__LC_LAST_UPDATE_TIMER
5562:	spt	0(%r14)
557	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
5583:	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
559	jno	.Lmcck_panic
560	tmhh	%r8,0x0001		# interrupting from user ?
561	jnz	6f
562	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
563	jno	.Lmcck_panic
564#if IS_ENABLED(CONFIG_KVM)
565	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,6f
566	OUTSIDE	%r9,.Lsie_entry,.Lsie_skip,4f
567	oi	__LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
568	j	5f
5694:	CHKSTG	.Lmcck_panic
5705:	larl	%r14,.Lstosm_tmp
571	stosm	0(%r14),0x04		# turn dat on, keep irqs off
572	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
573	SIEEXIT
574	j	.Lmcck_stack
575#endif
5766:	CHKSTG	.Lmcck_panic
577	larl	%r14,.Lstosm_tmp
578	stosm	0(%r14),0x04		# turn dat on, keep irqs off
579	tmhh	%r8,0x0001		# interrupting from user ?
580	jz	.Lmcck_stack
581	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
582.Lmcck_stack:
583	lg	%r15,__LC_MCCK_STACK
584	la	%r11,STACK_FRAME_OVERHEAD(%r15)
585	stctg	%c1,%c1,__PT_CR1(%r11)
586	lctlg	%c1,%c1,__LC_KERNEL_ASCE
587	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
588	lghi	%r14,__LC_GPREGS_SAVE_AREA+64
589	stmg	%r0,%r7,__PT_R0(%r11)
590	# clear user controlled registers to prevent speculative use
591	xgr	%r0,%r0
592	xgr	%r1,%r1
593	xgr	%r3,%r3
594	xgr	%r4,%r4
595	xgr	%r5,%r5
596	xgr	%r6,%r6
597	xgr	%r7,%r7
598	xgr	%r10,%r10
599	mvc	__PT_R8(64,%r11),0(%r14)
600	stmg	%r8,%r9,__PT_PSW(%r11)
601	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
602	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
603	lgr	%r2,%r11		# pass pointer to pt_regs
604	brasl	%r14,s390_do_machine_check
605	cghi	%r2,0
606	je	.Lmcck_return
607	lg	%r1,__LC_KERNEL_STACK	# switch to kernel stack
608	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
609	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
610	la	%r11,STACK_FRAME_OVERHEAD(%r1)
611	lgr	%r15,%r1
612	brasl	%r14,s390_handle_mcck
613.Lmcck_return:
614	lctlg	%c1,%c1,__PT_CR1(%r11)
615	lmg	%r0,%r10,__PT_R0(%r11)
616	mvc	__LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
617	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
618	jno	0f
619	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
620	stpt	__LC_EXIT_TIMER
6210:	ALTERNATIVE "", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193
622	LBEAR	0(%r12)
623	lmg	%r11,%r15,__PT_R11(%r11)
624	LPSWEY	__LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE
625
626.Lmcck_panic:
627	/*
628	 * Iterate over all possible CPU addresses in the range 0..0xffff
629	 * and stop each CPU using signal processor. Use compare and swap
630	 * to allow just one CPU-stopper and prevent concurrent CPUs from
631	 * stopping each other while leaving the others running.
632	 */
633	lhi	%r5,0
634	lhi	%r6,1
635	larl	%r7,.Lstop_lock
636	cs	%r5,%r6,0(%r7)		# single CPU-stopper only
637	jnz	4f
638	larl	%r7,.Lthis_cpu
639	stap	0(%r7)			# this CPU address
640	lh	%r4,0(%r7)
641	nilh	%r4,0
642	lhi	%r0,1
643	sll	%r0,16			# CPU counter
644	lhi	%r3,0			# next CPU address
6450:	cr	%r3,%r4
646	je	2f
6471:	sigp	%r1,%r3,SIGP_STOP	# stop next CPU
648	brc	SIGP_CC_BUSY,1b
6492:	ahi	%r3,1
650	brct	%r0,0b
6513:	sigp	%r1,%r4,SIGP_STOP	# stop this CPU
652	brc	SIGP_CC_BUSY,3b
6534:	j	4b
654ENDPROC(mcck_int_handler)
655
656ENTRY(restart_int_handler)
657	ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
658	stg	%r15,__LC_SAVE_AREA_RESTART
659	TSTMSK	__LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
660	jz	0f
661	la	%r15,4095
662	lctlg	%c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r15)
6630:	larl	%r15,.Lstosm_tmp
664	stosm	0(%r15),0x04			# turn dat on, keep irqs off
665	lg	%r15,__LC_RESTART_STACK
666	xc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
667	stmg	%r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
668	mvc	STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
669	mvc	STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW
670	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
671	lg	%r1,__LC_RESTART_FN		# load fn, parm & source cpu
672	lg	%r2,__LC_RESTART_DATA
673	lgf	%r3,__LC_RESTART_SOURCE
674	ltgr	%r3,%r3				# test source cpu address
675	jm	1f				# negative -> skip source stop
6760:	sigp	%r4,%r3,SIGP_SENSE		# sigp sense to source cpu
677	brc	10,0b				# wait for status stored
6781:	basr	%r14,%r1			# call function
679	stap	__SF_EMPTY(%r15)		# store cpu address
680	llgh	%r3,__SF_EMPTY(%r15)
6812:	sigp	%r4,%r3,SIGP_STOP		# sigp stop to current cpu
682	brc	2,2b
6833:	j	3b
684ENDPROC(restart_int_handler)
685
686	.section .kprobes.text, "ax"
687
688#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
689/*
690 * The synchronous or the asynchronous stack overflowed. We are dead.
691 * No need to properly save the registers, we are going to panic anyway.
692 * Setup a pt_regs so that show_trace can provide a good call trace.
693 */
694ENTRY(stack_overflow)
695	lg	%r15,__LC_NODAT_STACK	# change to panic stack
696	la	%r11,STACK_FRAME_OVERHEAD(%r15)
697	stmg	%r0,%r7,__PT_R0(%r11)
698	stmg	%r8,%r9,__PT_PSW(%r11)
699	mvc	__PT_R8(64,%r11),0(%r14)
700	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
701	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
702	lgr	%r2,%r11		# pass pointer to pt_regs
703	jg	kernel_stack_overflow
704ENDPROC(stack_overflow)
705#endif
706
707	.section .data, "aw"
708		.align	4
709.Lstop_lock:	.long	0
710.Lthis_cpu:	.short	0
711.Lstosm_tmp:	.byte	0
712	.section .rodata, "a"
713#define SYSCALL(esame,emu)	.quad __s390x_ ## esame
714	.globl	sys_call_table
715sys_call_table:
716#include "asm/syscall_table.h"
717#undef SYSCALL
718
719#ifdef CONFIG_COMPAT
720
721#define SYSCALL(esame,emu)	.quad __s390_ ## emu
722	.globl	sys_call_table_emu
723sys_call_table_emu:
724#include "asm/syscall_table.h"
725#undef SYSCALL
726#endif
727