xref: /linux/arch/s390/kernel/entry.S (revision eb7cca1faf9883d7b4da792281147dbedc449238)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *    S390 low-level entry points.
4 *
5 *    Copyright IBM Corp. 1999, 2012
6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 *		 Hartmut Penner (hp@de.ibm.com),
8 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 */
10
11#include <linux/export.h>
12#include <linux/init.h>
13#include <linux/linkage.h>
14#include <asm/asm-extable.h>
15#include <asm/alternative-asm.h>
16#include <asm/processor.h>
17#include <asm/cache.h>
18#include <asm/dwarf.h>
19#include <asm/errno.h>
20#include <asm/ptrace.h>
21#include <asm/thread_info.h>
22#include <asm/asm-offsets.h>
23#include <asm/unistd.h>
24#include <asm/page.h>
25#include <asm/sigp.h>
26#include <asm/irq.h>
27#include <asm/fpu-insn.h>
28#include <asm/setup.h>
29#include <asm/nmi.h>
30#include <asm/nospec-insn.h>
31
32_LPP_OFFSET	= __LC_LPP
33
34	.macro STBEAR address
35	ALTERNATIVE "nop", ".insn s,0xb2010000,\address", 193
36	.endm
37
38	.macro LBEAR address
39	ALTERNATIVE "nop", ".insn s,0xb2000000,\address", 193
40	.endm
41
42	.macro LPSWEY address,lpswe
43	ALTERNATIVE "b \lpswe; nopr", ".insn siy,0xeb0000000071,\address,0", 193
44	.endm
45
46	.macro MBEAR reg
47	ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193
48	.endm
49
50	.macro	CHECK_STACK savearea
51#ifdef CONFIG_CHECK_STACK
52	tml	%r15,THREAD_SIZE - CONFIG_STACK_GUARD
53	lghi	%r14,\savearea
54	jz	stack_overflow
55#endif
56	.endm
57
58	.macro	CHECK_VMAP_STACK savearea,oklabel
59#ifdef CONFIG_VMAP_STACK
60	lgr	%r14,%r15
61	nill	%r14,0x10000 - THREAD_SIZE
62	oill	%r14,STACK_INIT_OFFSET
63	clg	%r14,__LC_KERNEL_STACK
64	je	\oklabel
65	clg	%r14,__LC_ASYNC_STACK
66	je	\oklabel
67	clg	%r14,__LC_MCCK_STACK
68	je	\oklabel
69	clg	%r14,__LC_NODAT_STACK
70	je	\oklabel
71	clg	%r14,__LC_RESTART_STACK
72	je	\oklabel
73	lghi	%r14,\savearea
74	j	stack_overflow
75#else
76	j	\oklabel
77#endif
78	.endm
79
80	/*
81	 * The TSTMSK macro generates a test-under-mask instruction by
82	 * calculating the memory offset for the specified mask value.
83	 * Mask value can be any constant.  The macro shifts the mask
84	 * value to calculate the memory offset for the test-under-mask
85	 * instruction.
86	 */
87	.macro TSTMSK addr, mask, size=8, bytepos=0
88		.if (\bytepos < \size) && (\mask >> 8)
89			.if (\mask & 0xff)
90				.error "Mask exceeds byte boundary"
91			.endif
92			TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
93			.exitm
94		.endif
95		.ifeq \mask
96			.error "Mask must not be zero"
97		.endif
98		off = \size - \bytepos - 1
99		tm	off+\addr, \mask
100	.endm
101
102	.macro BPOFF
103	ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", 82
104	.endm
105
106	.macro BPON
107	ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", 82
108	.endm
109
110	.macro BPENTER tif_ptr,tif_mask
111	ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \
112		    "j .+12; nop; nop", 82
113	.endm
114
115	.macro BPEXIT tif_ptr,tif_mask
116	TSTMSK	\tif_ptr,\tif_mask
117	ALTERNATIVE "jz .+8;  .insn rrf,0xb2e80000,0,0,12,0", \
118		    "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", 82
119	.endm
120
121#if IS_ENABLED(CONFIG_KVM)
122	/*
123	 * The OUTSIDE macro jumps to the provided label in case the value
124	 * in the provided register is outside of the provided range. The
125	 * macro is useful for checking whether a PSW stored in a register
126	 * pair points inside or outside of a block of instructions.
127	 * @reg: register to check
128	 * @start: start of the range
129	 * @end: end of the range
130	 * @outside_label: jump here if @reg is outside of [@start..@end)
131	 */
132	.macro OUTSIDE reg,start,end,outside_label
133	lgr	%r14,\reg
134	larl	%r13,\start
135	slgr	%r14,%r13
136	clgfrl	%r14,.Lrange_size\@
137	jhe	\outside_label
138	.section .rodata, "a"
139	.balign 4
140.Lrange_size\@:
141	.long	\end - \start
142	.previous
143	.endm
144
145	.macro SIEEXIT
146	lg	%r9,__SF_SIE_CONTROL(%r15)	# get control block pointer
147	ni	__SIE_PROG0C+3(%r9),0xfe	# no longer in SIE
148	lctlg	%c1,%c1,__LC_KERNEL_ASCE	# load primary asce
149	larl	%r9,sie_exit			# skip forward to sie_exit
150	.endm
151#endif
152
153	.macro STACKLEAK_ERASE
154#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
155	brasl	%r14,stackleak_erase_on_task_stack
156#endif
157	.endm
158
159	GEN_BR_THUNK %r14
160
161	.section .kprobes.text, "ax"
162.Ldummy:
163	/*
164	 * The following nop exists only in order to avoid that the next
165	 * symbol starts at the beginning of the kprobes text section.
166	 * In that case there would be several symbols at the same address.
167	 * E.g. objdump would take an arbitrary symbol when disassembling
168	 * the code.
169	 * With the added nop in between this cannot happen.
170	 */
171	nop	0
172
173/*
174 * Scheduler resume function, called by __switch_to
175 *  gpr2 = (task_struct *)prev
176 *  gpr3 = (task_struct *)next
177 * Returns:
178 *  gpr2 = prev
179 */
180SYM_FUNC_START(__switch_to_asm)
181	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
182	lghi	%r4,__TASK_stack
183	lghi	%r1,__TASK_thread
184	llill	%r5,STACK_INIT_OFFSET
185	stg	%r15,__THREAD_ksp(%r1,%r2)	# store kernel stack of prev
186	lg	%r15,0(%r4,%r3)			# start of kernel stack of next
187	agr	%r15,%r5			# end of kernel stack of next
188	stg	%r3,__LC_CURRENT		# store task struct of next
189	stg	%r15,__LC_KERNEL_STACK		# store end of kernel stack
190	lg	%r15,__THREAD_ksp(%r1,%r3)	# load kernel stack of next
191	aghi	%r3,__TASK_pid
192	mvc	__LC_CURRENT_PID(4,%r0),0(%r3)	# store pid of next
193	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
194	ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40
195	BR_EX	%r14
196SYM_FUNC_END(__switch_to_asm)
197
198#if IS_ENABLED(CONFIG_KVM)
199/*
200 * __sie64a calling convention:
201 * %r2 pointer to sie control block phys
202 * %r3 pointer to sie control block virt
203 * %r4 guest register save area
204 */
205SYM_FUNC_START(__sie64a)
206	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
207	lg	%r12,__LC_CURRENT
208	stg	%r2,__SF_SIE_CONTROL_PHYS(%r15)	# save sie block physical..
209	stg	%r3,__SF_SIE_CONTROL(%r15)	# ...and virtual addresses
210	stg	%r4,__SF_SIE_SAVEAREA(%r15)	# save guest register save area
211	xc	__SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
212	mvc	__SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
213	lmg	%r0,%r13,0(%r4)			# load guest gprs 0-13
214	lg	%r14,__LC_GMAP			# get gmap pointer
215	ltgr	%r14,%r14
216	jz	.Lsie_gmap
217	lctlg	%c1,%c1,__GMAP_ASCE(%r14)	# load primary asce
218.Lsie_gmap:
219	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
220	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
221	tm	__SIE_PROG20+3(%r14),3		# last exit...
222	jnz	.Lsie_skip
223	lg	%r14,__SF_SIE_CONTROL_PHYS(%r15)	# get sie block phys addr
224	BPEXIT	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
225.Lsie_entry:
226	sie	0(%r14)
227# Let the next instruction be NOP to avoid triggering a machine check
228# and handling it in a guest as result of the instruction execution.
229	nopr	7
230.Lsie_leave:
231	BPOFF
232	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
233.Lsie_skip:
234	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
235	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
236	lctlg	%c1,%c1,__LC_KERNEL_ASCE	# load primary asce
237.Lsie_done:
238# some program checks are suppressing. C code (e.g. do_protection_exception)
239# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
240# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
241# Other instructions between __sie64a and .Lsie_done should not cause program
242# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
243.Lrewind_pad6:
244	nopr	7
245.Lrewind_pad4:
246	nopr	7
247.Lrewind_pad2:
248	nopr	7
249SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL)
250	lg	%r14,__SF_SIE_SAVEAREA(%r15)	# load guest register save area
251	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
252	xgr	%r0,%r0				# clear guest registers to
253	xgr	%r1,%r1				# prevent speculative use
254	xgr	%r3,%r3
255	xgr	%r4,%r4
256	xgr	%r5,%r5
257	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
258	lg	%r2,__SF_SIE_REASON(%r15)	# return exit reason code
259	BR_EX	%r14
260.Lsie_fault:
261	lghi	%r14,-EFAULT
262	stg	%r14,__SF_SIE_REASON(%r15)	# set exit reason code
263	j	sie_exit
264
265	EX_TABLE(.Lrewind_pad6,.Lsie_fault)
266	EX_TABLE(.Lrewind_pad4,.Lsie_fault)
267	EX_TABLE(.Lrewind_pad2,.Lsie_fault)
268	EX_TABLE(sie_exit,.Lsie_fault)
269SYM_FUNC_END(__sie64a)
270EXPORT_SYMBOL(__sie64a)
271EXPORT_SYMBOL(sie_exit)
272#endif
273
274/*
275 * SVC interrupt handler routine. System calls are synchronous events and
276 * are entered with interrupts disabled.
277 */
278
279SYM_CODE_START(system_call)
280	stpt	__LC_SYS_ENTER_TIMER
281	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
282	BPOFF
283	lghi	%r14,0
284.Lsysc_per:
285	STBEAR	__LC_LAST_BREAK
286	lctlg	%c1,%c1,__LC_KERNEL_ASCE
287	lg	%r15,__LC_KERNEL_STACK
288	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
289	stmg	%r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
290	# clear user controlled register to prevent speculative use
291	xgr	%r0,%r0
292	xgr	%r1,%r1
293	xgr	%r4,%r4
294	xgr	%r5,%r5
295	xgr	%r6,%r6
296	xgr	%r7,%r7
297	xgr	%r8,%r8
298	xgr	%r9,%r9
299	xgr	%r10,%r10
300	xgr	%r11,%r11
301	la	%r2,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
302	mvc	__PT_R8(64,%r2),__LC_SAVE_AREA_SYNC
303	MBEAR	%r2
304	lgr	%r3,%r14
305	brasl	%r14,__do_syscall
306	STACKLEAK_ERASE
307	lctlg	%c1,%c1,__LC_USER_ASCE
308	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
309	BPON
310	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
311	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
312	stpt	__LC_EXIT_TIMER
313	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
314SYM_CODE_END(system_call)
315
316#
317# a new process exits the kernel with ret_from_fork
318#
319SYM_CODE_START(ret_from_fork)
320	lgr	%r3,%r11
321	brasl	%r14,__ret_from_fork
322	STACKLEAK_ERASE
323	lctlg	%c1,%c1,__LC_USER_ASCE
324	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
325	BPON
326	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
327	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
328	stpt	__LC_EXIT_TIMER
329	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
330SYM_CODE_END(ret_from_fork)
331
332/*
333 * Program check handler routine
334 */
335
336SYM_CODE_START(pgm_check_handler)
337	stpt	__LC_SYS_ENTER_TIMER
338	BPOFF
339	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
340	lghi	%r10,0
341	lmg	%r8,%r9,__LC_PGM_OLD_PSW
342	tmhh	%r8,0x0001		# coming from user space?
343	jno	.Lpgm_skip_asce
344	lctlg	%c1,%c1,__LC_KERNEL_ASCE
345	j	3f			# -> fault in user space
346.Lpgm_skip_asce:
347#if IS_ENABLED(CONFIG_KVM)
348	# cleanup critical section for program checks in __sie64a
349	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,1f
350	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
351	SIEEXIT
352	lghi	%r10,_PIF_GUEST_FAULT
353#endif
3541:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
355	jnz	2f			# -> enabled, can't be a double fault
356	tm	__LC_PGM_ILC+3,0x80	# check for per exception
357	jnz	.Lpgm_svcper		# -> single stepped svc
3582:	CHECK_STACK __LC_SAVE_AREA_SYNC
359	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
360	# CHECK_VMAP_STACK branches to stack_overflow or 4f
361	CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
3623:	lg	%r15,__LC_KERNEL_STACK
3634:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
364	stg	%r10,__PT_FLAGS(%r11)
365	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
366	stmg	%r0,%r7,__PT_R0(%r11)
367	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
368	mvc	__PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK
369	stmg	%r8,%r9,__PT_PSW(%r11)
370
371	# clear user controlled registers to prevent speculative use
372	xgr	%r0,%r0
373	xgr	%r1,%r1
374	xgr	%r3,%r3
375	xgr	%r4,%r4
376	xgr	%r5,%r5
377	xgr	%r6,%r6
378	xgr	%r7,%r7
379	lgr	%r2,%r11
380	brasl	%r14,__do_pgm_check
381	tmhh	%r8,0x0001		# returning to user space?
382	jno	.Lpgm_exit_kernel
383	STACKLEAK_ERASE
384	lctlg	%c1,%c1,__LC_USER_ASCE
385	BPON
386	stpt	__LC_EXIT_TIMER
387.Lpgm_exit_kernel:
388	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
389	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
390	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
391	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
392
393#
394# single stepped system call
395#
396.Lpgm_svcper:
397	mvc	__LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
398	larl	%r14,.Lsysc_per
399	stg	%r14,__LC_RETURN_PSW+8
400	lghi	%r14,1
401	LBEAR	__LC_PGM_LAST_BREAK
402	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per
403SYM_CODE_END(pgm_check_handler)
404
405/*
406 * Interrupt handler macro used for external and IO interrupts.
407 */
408.macro INT_HANDLER name,lc_old_psw,handler
409SYM_CODE_START(\name)
410	stckf	__LC_INT_CLOCK
411	stpt	__LC_SYS_ENTER_TIMER
412	STBEAR	__LC_LAST_BREAK
413	BPOFF
414	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
415	lmg	%r8,%r9,\lc_old_psw
416	tmhh	%r8,0x0001			# interrupting from user ?
417	jnz	1f
418#if IS_ENABLED(CONFIG_KVM)
419	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,0f
420	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
421	SIEEXIT
422#endif
4230:	CHECK_STACK __LC_SAVE_AREA_ASYNC
424	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
425	j	2f
4261:	lctlg	%c1,%c1,__LC_KERNEL_ASCE
427	lg	%r15,__LC_KERNEL_STACK
4282:	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
429	la	%r11,STACK_FRAME_OVERHEAD(%r15)
430	stmg	%r0,%r7,__PT_R0(%r11)
431	# clear user controlled registers to prevent speculative use
432	xgr	%r0,%r0
433	xgr	%r1,%r1
434	xgr	%r3,%r3
435	xgr	%r4,%r4
436	xgr	%r5,%r5
437	xgr	%r6,%r6
438	xgr	%r7,%r7
439	xgr	%r10,%r10
440	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
441	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
442	MBEAR	%r11
443	stmg	%r8,%r9,__PT_PSW(%r11)
444	lgr	%r2,%r11		# pass pointer to pt_regs
445	brasl	%r14,\handler
446	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
447	tmhh	%r8,0x0001		# returning to user ?
448	jno	2f
449	STACKLEAK_ERASE
450	lctlg	%c1,%c1,__LC_USER_ASCE
451	BPON
452	stpt	__LC_EXIT_TIMER
4532:	LBEAR	__PT_LAST_BREAK(%r11)
454	lmg	%r0,%r15,__PT_R0(%r11)
455	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
456SYM_CODE_END(\name)
457.endm
458
459INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
460INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
461
462/*
463 * Load idle PSW.
464 */
465SYM_FUNC_START(psw_idle)
466	stg	%r14,(__SF_GPRS+8*8)(%r15)
467	stg	%r3,__SF_EMPTY(%r15)
468	larl	%r1,psw_idle_exit
469	stg	%r1,__SF_EMPTY+8(%r15)
470	larl	%r1,smp_cpu_mtid
471	llgf	%r1,0(%r1)
472	ltgr	%r1,%r1
473	jz	.Lpsw_idle_stcctm
474	.insn	rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2)
475.Lpsw_idle_stcctm:
476	oi	__LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
477	BPON
478	stckf	__CLOCK_IDLE_ENTER(%r2)
479	stpt	__TIMER_IDLE_ENTER(%r2)
480	lpswe	__SF_EMPTY(%r15)
481SYM_INNER_LABEL(psw_idle_exit, SYM_L_GLOBAL)
482	BR_EX	%r14
483SYM_FUNC_END(psw_idle)
484
485/*
486 * Machine check handler routines
487 */
488SYM_CODE_START(mcck_int_handler)
489	BPOFF
490	lmg	%r8,%r9,__LC_MCK_OLD_PSW
491	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
492	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
493	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CR_VALID
494	jno	.Lmcck_panic		# control registers invalid -> panic
495	ptlb
496	lghi	%r14,__LC_CPU_TIMER_SAVE_AREA
497	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
498	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
499	jo	3f
500	la	%r14,__LC_SYS_ENTER_TIMER
501	clc	0(8,%r14),__LC_EXIT_TIMER
502	jl	1f
503	la	%r14,__LC_EXIT_TIMER
5041:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
505	jl	2f
506	la	%r14,__LC_LAST_UPDATE_TIMER
5072:	spt	0(%r14)
508	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
5093:	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
510	jno	.Lmcck_panic
511	tmhh	%r8,0x0001		# interrupting from user ?
512	jnz	.Lmcck_user
513	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
514	jno	.Lmcck_panic
515#if IS_ENABLED(CONFIG_KVM)
516	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,.Lmcck_user
517	OUTSIDE	%r9,.Lsie_entry,.Lsie_leave,4f
518	oi	__LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
5194:	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
520	SIEEXIT
521#endif
522.Lmcck_user:
523	lg	%r15,__LC_MCCK_STACK
524	la	%r11,STACK_FRAME_OVERHEAD(%r15)
525	stctg	%c1,%c1,__PT_CR1(%r11)
526	lctlg	%c1,%c1,__LC_KERNEL_ASCE
527	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
528	lghi	%r14,__LC_GPREGS_SAVE_AREA+64
529	stmg	%r0,%r7,__PT_R0(%r11)
530	# clear user controlled registers to prevent speculative use
531	xgr	%r0,%r0
532	xgr	%r1,%r1
533	xgr	%r3,%r3
534	xgr	%r4,%r4
535	xgr	%r5,%r5
536	xgr	%r6,%r6
537	xgr	%r7,%r7
538	xgr	%r10,%r10
539	mvc	__PT_R8(64,%r11),0(%r14)
540	stmg	%r8,%r9,__PT_PSW(%r11)
541	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
542	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
543	lgr	%r2,%r11		# pass pointer to pt_regs
544	brasl	%r14,s390_do_machine_check
545	lctlg	%c1,%c1,__PT_CR1(%r11)
546	lmg	%r0,%r10,__PT_R0(%r11)
547	mvc	__LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
548	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
549	jno	0f
550	BPON
551	stpt	__LC_EXIT_TIMER
5520:	ALTERNATIVE "nop", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193
553	LBEAR	0(%r12)
554	lmg	%r11,%r15,__PT_R11(%r11)
555	LPSWEY	__LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE
556
557.Lmcck_panic:
558	/*
559	 * Iterate over all possible CPU addresses in the range 0..0xffff
560	 * and stop each CPU using signal processor. Use compare and swap
561	 * to allow just one CPU-stopper and prevent concurrent CPUs from
562	 * stopping each other while leaving the others running.
563	 */
564	lhi	%r5,0
565	lhi	%r6,1
566	larl	%r7,stop_lock
567	cs	%r5,%r6,0(%r7)		# single CPU-stopper only
568	jnz	4f
569	larl	%r7,this_cpu
570	stap	0(%r7)			# this CPU address
571	lh	%r4,0(%r7)
572	nilh	%r4,0
573	lhi	%r0,1
574	sll	%r0,16			# CPU counter
575	lhi	%r3,0			# next CPU address
5760:	cr	%r3,%r4
577	je	2f
5781:	sigp	%r1,%r3,SIGP_STOP	# stop next CPU
579	brc	SIGP_CC_BUSY,1b
5802:	ahi	%r3,1
581	brct	%r0,0b
5823:	sigp	%r1,%r4,SIGP_STOP	# stop this CPU
583	brc	SIGP_CC_BUSY,3b
5844:	j	4b
585SYM_CODE_END(mcck_int_handler)
586
587SYM_CODE_START(restart_int_handler)
588	ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40
589	stg	%r15,__LC_SAVE_AREA_RESTART
590	TSTMSK	__LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
591	jz	0f
592	lctlg	%c0,%c15,__LC_CREGS_SAVE_AREA
5930:	larl	%r15,daton_psw
594	lpswe	0(%r15)				# turn dat on, keep irqs off
595.Ldaton:
596	lg	%r15,__LC_RESTART_STACK
597	xc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
598	stmg	%r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
599	mvc	STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
600	mvc	STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW
601	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
602	lg	%r1,__LC_RESTART_FN		# load fn, parm & source cpu
603	lg	%r2,__LC_RESTART_DATA
604	lgf	%r3,__LC_RESTART_SOURCE
605	ltgr	%r3,%r3				# test source cpu address
606	jm	1f				# negative -> skip source stop
6070:	sigp	%r4,%r3,SIGP_SENSE		# sigp sense to source cpu
608	brc	10,0b				# wait for status stored
6091:	basr	%r14,%r1			# call function
610	stap	__SF_EMPTY(%r15)		# store cpu address
611	llgh	%r3,__SF_EMPTY(%r15)
6122:	sigp	%r4,%r3,SIGP_STOP		# sigp stop to current cpu
613	brc	2,2b
6143:	j	3b
615SYM_CODE_END(restart_int_handler)
616
617	.section .kprobes.text, "ax"
618
619#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
620/*
621 * The synchronous or the asynchronous stack overflowed. We are dead.
622 * No need to properly save the registers, we are going to panic anyway.
623 * Setup a pt_regs so that show_trace can provide a good call trace.
624 */
625SYM_CODE_START(stack_overflow)
626	lg	%r15,__LC_NODAT_STACK	# change to panic stack
627	la	%r11,STACK_FRAME_OVERHEAD(%r15)
628	stmg	%r0,%r7,__PT_R0(%r11)
629	stmg	%r8,%r9,__PT_PSW(%r11)
630	mvc	__PT_R8(64,%r11),0(%r14)
631	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
632	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
633	lgr	%r2,%r11		# pass pointer to pt_regs
634	jg	kernel_stack_overflow
635SYM_CODE_END(stack_overflow)
636#endif
637
638	.section .data, "aw"
639	.balign	4
640SYM_DATA_LOCAL(stop_lock,	.long 0)
641SYM_DATA_LOCAL(this_cpu,	.short 0)
642	.balign	8
643SYM_DATA_START_LOCAL(daton_psw)
644	.quad	PSW_KERNEL_BITS
645	.quad	.Ldaton
646SYM_DATA_END(daton_psw)
647
648	.section .rodata, "a"
649#define SYSCALL(esame,emu)	.quad __s390x_ ## esame
650SYM_DATA_START(sys_call_table)
651#include "asm/syscall_table.h"
652SYM_DATA_END(sys_call_table)
653#undef SYSCALL
654
655#ifdef CONFIG_COMPAT
656
657#define SYSCALL(esame,emu)	.quad __s390_ ## emu
658SYM_DATA_START(sys_call_table_emu)
659#include "asm/syscall_table.h"
660SYM_DATA_END(sys_call_table_emu)
661#undef SYSCALL
662#endif
663