xref: /linux/arch/s390/kernel/entry.S (revision 364eeb79a213fcf9164208b53764223ad522d6b3)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *    S390 low-level entry points.
4 *
5 *    Copyright IBM Corp. 1999, 2012
6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 *		 Hartmut Penner (hp@de.ibm.com),
8 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 */
10
11#include <linux/export.h>
12#include <linux/init.h>
13#include <linux/linkage.h>
14#include <asm/asm-extable.h>
15#include <asm/alternative.h>
16#include <asm/processor.h>
17#include <asm/cache.h>
18#include <asm/dwarf.h>
19#include <asm/errno.h>
20#include <asm/ptrace.h>
21#include <asm/thread_info.h>
22#include <asm/asm-offsets.h>
23#include <asm/unistd.h>
24#include <asm/page.h>
25#include <asm/sigp.h>
26#include <asm/irq.h>
27#include <asm/fpu-insn.h>
28#include <asm/setup.h>
29#include <asm/nmi.h>
30#include <asm/nospec-insn.h>
31#include <asm/lowcore.h>
32
33_LPP_OFFSET	= __LC_LPP
34
35	.macro STBEAR address
36	ALTERNATIVE "nop", ".insn s,0xb2010000,\address", ALT_FACILITY(193)
37	.endm
38
39	.macro LBEAR address
40	ALTERNATIVE "nop", ".insn s,0xb2000000,\address", ALT_FACILITY(193)
41	.endm
42
43	.macro LPSWEY address, lpswe
44	ALTERNATIVE_2 "b \lpswe;nopr", \
45		".insn siy,0xeb0000000071,\address,0", ALT_FACILITY(193),		\
46		__stringify(.insn siy,0xeb0000000071,LOWCORE_ALT_ADDRESS+\address,0),	\
47		ALT_LOWCORE
48	.endm
49
50	.macro MBEAR reg, lowcore
51	ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK(\lowcore)),\
52		ALT_FACILITY(193)
53	.endm
54
55	.macro	CHECK_STACK savearea, lowcore
56#ifdef CONFIG_CHECK_STACK
57	tml	%r15,THREAD_SIZE - CONFIG_STACK_GUARD
58	la	%r14,\savearea(\lowcore)
59	jz	stack_overflow
60#endif
61	.endm
62
63	.macro	CHECK_VMAP_STACK savearea, lowcore, oklabel
64#ifdef CONFIG_VMAP_STACK
65	lgr	%r14,%r15
66	nill	%r14,0x10000 - THREAD_SIZE
67	oill	%r14,STACK_INIT_OFFSET
68	clg	%r14,__LC_KERNEL_STACK(\lowcore)
69	je	\oklabel
70	clg	%r14,__LC_ASYNC_STACK(\lowcore)
71	je	\oklabel
72	clg	%r14,__LC_MCCK_STACK(\lowcore)
73	je	\oklabel
74	clg	%r14,__LC_NODAT_STACK(\lowcore)
75	je	\oklabel
76	clg	%r14,__LC_RESTART_STACK(\lowcore)
77	je	\oklabel
78	la	%r14,\savearea(\lowcore)
79	j	stack_overflow
80#else
81	j	\oklabel
82#endif
83	.endm
84
85	/*
86	 * The TSTMSK macro generates a test-under-mask instruction by
87	 * calculating the memory offset for the specified mask value.
88	 * Mask value can be any constant.  The macro shifts the mask
89	 * value to calculate the memory offset for the test-under-mask
90	 * instruction.
91	 */
92	.macro TSTMSK addr, mask, size=8, bytepos=0
93		.if (\bytepos < \size) && (\mask >> 8)
94			.if (\mask & 0xff)
95				.error "Mask exceeds byte boundary"
96			.endif
97			TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
98			.exitm
99		.endif
100		.ifeq \mask
101			.error "Mask must not be zero"
102		.endif
103		off = \size - \bytepos - 1
104		tm	off+\addr, \mask
105	.endm
106
107	.macro BPOFF
108	ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", ALT_SPEC(82)
109	.endm
110
111	.macro BPON
112	ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", ALT_SPEC(82)
113	.endm
114
115	.macro BPENTER tif_ptr,tif_mask
116	ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \
117		    "j .+12; nop; nop", ALT_SPEC(82)
118	.endm
119
120	.macro BPEXIT tif_ptr,tif_mask
121	TSTMSK	\tif_ptr,\tif_mask
122	ALTERNATIVE "jz .+8;  .insn rrf,0xb2e80000,0,0,12,0", \
123		    "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", ALT_SPEC(82)
124	.endm
125
126#if IS_ENABLED(CONFIG_KVM)
127	.macro SIEEXIT sie_control,lowcore
128	lg	%r9,\sie_control			# get control block pointer
129	ni	__SIE_PROG0C+3(%r9),0xfe		# no longer in SIE
130	lctlg	%c1,%c1,__LC_KERNEL_ASCE(\lowcore)	# load primary asce
131	lg	%r9,__LC_CURRENT(\lowcore)
132	mvi	__TI_sie(%r9),0
133	larl	%r9,sie_exit			# skip forward to sie_exit
134	.endm
135#endif
136
137	.macro STACKLEAK_ERASE
138#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
139	brasl	%r14,stackleak_erase_on_task_stack
140#endif
141	.endm
142
143	GEN_BR_THUNK %r14
144
145	.section .kprobes.text, "ax"
146.Ldummy:
147	/*
148	 * The following nop exists only in order to avoid that the next
149	 * symbol starts at the beginning of the kprobes text section.
150	 * In that case there would be several symbols at the same address.
151	 * E.g. objdump would take an arbitrary symbol when disassembling
152	 * the code.
153	 * With the added nop in between this cannot happen.
154	 */
155	nop	0
156
157/*
158 * Scheduler resume function, called by __switch_to
159 *  gpr2 = (task_struct *)prev
160 *  gpr3 = (task_struct *)next
161 * Returns:
162 *  gpr2 = prev
163 */
164SYM_FUNC_START(__switch_to_asm)
165	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
166	lghi	%r4,__TASK_stack
167	lghi	%r1,__TASK_thread
168	llill	%r5,STACK_INIT_OFFSET
169	stg	%r15,__THREAD_ksp(%r1,%r2)	# store kernel stack of prev
170	lg	%r15,0(%r4,%r3)			# start of kernel stack of next
171	agr	%r15,%r5			# end of kernel stack of next
172	GET_LC	%r13
173	stg	%r3,__LC_CURRENT(%r13)		# store task struct of next
174	stg	%r15,__LC_KERNEL_STACK(%r13)	# store end of kernel stack
175	lg	%r15,__THREAD_ksp(%r1,%r3)	# load kernel stack of next
176	aghi	%r3,__TASK_pid
177	mvc	__LC_CURRENT_PID(4,%r13),0(%r3)	# store pid of next
178	ALTERNATIVE "nop", "lpp _LPP_OFFSET(%r13)", ALT_FACILITY(40)
179	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
180	BR_EX	%r14
181SYM_FUNC_END(__switch_to_asm)
182
183#if IS_ENABLED(CONFIG_KVM)
184/*
185 * __sie64a calling convention:
186 * %r2 pointer to sie control block phys
187 * %r3 pointer to sie control block virt
188 * %r4 guest register save area
189 * %r5 guest asce
190 */
191SYM_FUNC_START(__sie64a)
192	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
193	GET_LC	%r13
194	lg	%r14,__LC_CURRENT(%r13)
195	stg	%r2,__SF_SIE_CONTROL_PHYS(%r15)	# save sie block physical..
196	stg	%r3,__SF_SIE_CONTROL(%r15)	# ...and virtual addresses
197	stg	%r4,__SF_SIE_SAVEAREA(%r15)	# save guest register save area
198	stg	%r5,__SF_SIE_GUEST_ASCE(%r15)	# save guest asce
199	xc	__SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
200	mvc	__SF_SIE_FLAGS(8,%r15),__TI_flags(%r14) # copy thread flags
201	lmg	%r0,%r13,0(%r4)			# load guest gprs 0-13
202	mvi	__TI_sie(%r14),1
203	lctlg	%c1,%c1,__SF_SIE_GUEST_ASCE(%r15) # load primary asce
204	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
205	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
206	tm	__SIE_PROG20+3(%r14),3		# last exit...
207	jnz	.Lsie_skip
208	lg	%r14,__SF_SIE_CONTROL_PHYS(%r15)	# get sie block phys addr
209	BPEXIT	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
210.Lsie_entry:
211	sie	0(%r14)
212# Let the next instruction be NOP to avoid triggering a machine check
213# and handling it in a guest as result of the instruction execution.
214	nopr	7
215.Lsie_leave:
216	BPOFF
217	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
218.Lsie_skip:
219	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
220	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
221	GET_LC	%r14
222	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r14)	# load primary asce
223	lg	%r14,__LC_CURRENT(%r14)
224	mvi	__TI_sie(%r14),0
225SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL)
226	lg	%r14,__SF_SIE_SAVEAREA(%r15)	# load guest register save area
227	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
228	xgr	%r0,%r0				# clear guest registers to
229	xgr	%r1,%r1				# prevent speculative use
230	xgr	%r3,%r3
231	xgr	%r4,%r4
232	xgr	%r5,%r5
233	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
234	lg	%r2,__SF_SIE_REASON(%r15)	# return exit reason code
235	BR_EX	%r14
236SYM_FUNC_END(__sie64a)
237EXPORT_SYMBOL(__sie64a)
238EXPORT_SYMBOL(sie_exit)
239#endif
240
241/*
242 * SVC interrupt handler routine. System calls are synchronous events and
243 * are entered with interrupts disabled.
244 */
245
246SYM_CODE_START(system_call)
247	STMG_LC	%r8,%r15,__LC_SAVE_AREA
248	GET_LC	%r13
249	stpt	__LC_SYS_ENTER_TIMER(%r13)
250	BPOFF
251	lghi	%r14,0
252.Lsysc_per:
253	STBEAR	__LC_LAST_BREAK(%r13)
254	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r13)
255	lg	%r15,__LC_KERNEL_STACK(%r13)
256	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
257	stmg	%r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
258	# clear user controlled register to prevent speculative use
259	xgr	%r0,%r0
260	xgr	%r1,%r1
261	xgr	%r4,%r4
262	xgr	%r5,%r5
263	xgr	%r6,%r6
264	xgr	%r7,%r7
265	xgr	%r8,%r8
266	xgr	%r9,%r9
267	xgr	%r10,%r10
268	xgr	%r11,%r11
269	la	%r2,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
270	mvc	__PT_R8(64,%r2),__LC_SAVE_AREA(%r13)
271	MBEAR	%r2,%r13
272	lgr	%r3,%r14
273	brasl	%r14,__do_syscall
274	STACKLEAK_ERASE
275	lctlg	%c1,%c1,__LC_USER_ASCE(%r13)
276	mvc	__LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
277	BPON
278	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
279	stpt	__LC_EXIT_TIMER(%r13)
280	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
281	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
282SYM_CODE_END(system_call)
283
284#
285# a new process exits the kernel with ret_from_fork
286#
287SYM_CODE_START(ret_from_fork)
288	lgr	%r3,%r11
289	brasl	%r14,__ret_from_fork
290	STACKLEAK_ERASE
291	GET_LC	%r13
292	lctlg	%c1,%c1,__LC_USER_ASCE(%r13)
293	mvc	__LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
294	BPON
295	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
296	stpt	__LC_EXIT_TIMER(%r13)
297	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
298	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
299SYM_CODE_END(ret_from_fork)
300
301/*
302 * Program check handler routine
303 */
304
305SYM_CODE_START(pgm_check_handler)
306	STMG_LC	%r8,%r15,__LC_SAVE_AREA
307	GET_LC	%r13
308	stpt	__LC_SYS_ENTER_TIMER(%r13)
309	BPOFF
310	lmg	%r8,%r9,__LC_PGM_OLD_PSW(%r13)
311	xgr	%r10,%r10
312	tmhh	%r8,0x0001		# coming from user space?
313	jno	.Lpgm_skip_asce
314	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r13)
315	j	3f			# -> fault in user space
316.Lpgm_skip_asce:
317#if IS_ENABLED(CONFIG_KVM)
318	lg	%r11,__LC_CURRENT(%r13)
319	tm	__TI_sie(%r11),0xff
320	jz	1f
321	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
322	SIEEXIT __SF_SIE_CONTROL(%r15),%r13
323	lghi	%r10,_PIF_GUEST_FAULT
324#endif
3251:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
326	jnz	2f			# -> enabled, can't be a double fault
327	tm	__LC_PGM_ILC+3(%r13),0x80	# check for per exception
328	jnz	.Lpgm_svcper		# -> single stepped svc
3292:	CHECK_STACK __LC_SAVE_AREA,%r13
330	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
331	# CHECK_VMAP_STACK branches to stack_overflow or 4f
332	CHECK_VMAP_STACK __LC_SAVE_AREA,%r13,4f
3333:	lg	%r15,__LC_KERNEL_STACK(%r13)
3344:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
335	stg	%r10,__PT_FLAGS(%r11)
336	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
337	stmg	%r0,%r7,__PT_R0(%r11)
338	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA(%r13)
339	mvc	__PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK(%r13)
340	stmg	%r8,%r9,__PT_PSW(%r11)
341	# clear user controlled registers to prevent speculative use
342	xgr	%r0,%r0
343	xgr	%r1,%r1
344	xgr	%r3,%r3
345	xgr	%r4,%r4
346	xgr	%r5,%r5
347	xgr	%r6,%r6
348	xgr	%r7,%r7
349	xgr	%r12,%r12
350	lgr	%r2,%r11
351	brasl	%r14,__do_pgm_check
352	tmhh	%r8,0x0001		# returning to user space?
353	jno	.Lpgm_exit_kernel
354	STACKLEAK_ERASE
355	lctlg	%c1,%c1,__LC_USER_ASCE(%r13)
356	BPON
357	stpt	__LC_EXIT_TIMER(%r13)
358.Lpgm_exit_kernel:
359	mvc	__LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
360	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
361	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
362	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
363
364#
365# single stepped system call
366#
367.Lpgm_svcper:
368	mvc	__LC_RETURN_PSW(8,%r13),__LC_SVC_NEW_PSW(%r13)
369	larl	%r14,.Lsysc_per
370	stg	%r14,__LC_RETURN_PSW+8(%r13)
371	lghi	%r14,1
372	LBEAR	__LC_PGM_LAST_BREAK(%r13)
373	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per
374SYM_CODE_END(pgm_check_handler)
375
376/*
377 * Interrupt handler macro used for external and IO interrupts.
378 */
379.macro INT_HANDLER name,lc_old_psw,handler
380SYM_CODE_START(\name)
381	STMG_LC	%r8,%r15,__LC_SAVE_AREA
382	GET_LC	%r13
383	stckf	__LC_INT_CLOCK(%r13)
384	stpt	__LC_SYS_ENTER_TIMER(%r13)
385	STBEAR	__LC_LAST_BREAK(%r13)
386	BPOFF
387	lmg	%r8,%r9,\lc_old_psw(%r13)
388	tmhh	%r8,0x0001			# interrupting from user ?
389	jnz	1f
390#if IS_ENABLED(CONFIG_KVM)
391	lg	%r10,__LC_CURRENT(%r13)
392	tm	__TI_sie(%r10),0xff
393	jz	0f
394	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
395	SIEEXIT __SF_SIE_CONTROL(%r15),%r13
396#endif
3970:	CHECK_STACK __LC_SAVE_AREA,%r13
398	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
399	j	2f
4001:	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r13)
401	lg	%r15,__LC_KERNEL_STACK(%r13)
4022:	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
403	la	%r11,STACK_FRAME_OVERHEAD(%r15)
404	stmg	%r0,%r7,__PT_R0(%r11)
405	# clear user controlled registers to prevent speculative use
406	xgr	%r0,%r0
407	xgr	%r1,%r1
408	xgr	%r3,%r3
409	xgr	%r4,%r4
410	xgr	%r5,%r5
411	xgr	%r6,%r6
412	xgr	%r7,%r7
413	xgr	%r10,%r10
414	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
415	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA(%r13)
416	MBEAR	%r11,%r13
417	stmg	%r8,%r9,__PT_PSW(%r11)
418	lgr	%r2,%r11		# pass pointer to pt_regs
419	brasl	%r14,\handler
420	mvc	__LC_RETURN_PSW(16,%r13),__PT_PSW(%r11)
421	tmhh	%r8,0x0001		# returning to user ?
422	jno	2f
423	STACKLEAK_ERASE
424	lctlg	%c1,%c1,__LC_USER_ASCE(%r13)
425	BPON
426	stpt	__LC_EXIT_TIMER(%r13)
4272:	LBEAR	__PT_LAST_BREAK(%r11)
428	lmg	%r0,%r15,__PT_R0(%r11)
429	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
430SYM_CODE_END(\name)
431.endm
432
433INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
434INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
435
436/*
437 * Machine check handler routines
438 */
439SYM_CODE_START(mcck_int_handler)
440	BPOFF
441	GET_LC	%r13
442	lmg	%r8,%r9,__LC_MCK_OLD_PSW(%r13)
443	TSTMSK	__LC_MCCK_CODE(%r13),MCCK_CODE_SYSTEM_DAMAGE
444	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
445	TSTMSK	__LC_MCCK_CODE(%r13),MCCK_CODE_CR_VALID
446	jno	.Lmcck_panic		# control registers invalid -> panic
447	ptlb
448	lay	%r14,__LC_CPU_TIMER_SAVE_AREA(%r13)
449	mvc	__LC_MCCK_ENTER_TIMER(8,%r13),0(%r14)
450	TSTMSK	__LC_MCCK_CODE(%r13),MCCK_CODE_CPU_TIMER_VALID
451	jo	3f
452	la	%r14,__LC_SYS_ENTER_TIMER(%r13)
453	clc	0(8,%r14),__LC_EXIT_TIMER(%r13)
454	jl	1f
455	la	%r14,__LC_EXIT_TIMER(%r13)
4561:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER(%r13)
457	jl	2f
458	la	%r14,__LC_LAST_UPDATE_TIMER(%r13)
4592:	spt	0(%r14)
460	mvc	__LC_MCCK_ENTER_TIMER(8,%r13),0(%r14)
4613:	TSTMSK	__LC_MCCK_CODE(%r13),MCCK_CODE_PSW_MWP_VALID
462	jno	.Lmcck_panic
463	tmhh	%r8,0x0001		# interrupting from user ?
464	jnz	.Lmcck_user
465	TSTMSK	__LC_MCCK_CODE(%r13),MCCK_CODE_PSW_IA_VALID
466	jno	.Lmcck_panic
467#if IS_ENABLED(CONFIG_KVM)
468	lg	%r10,__LC_CURRENT(%r13)
469	tm	__TI_sie(%r10),0xff
470	jz	.Lmcck_user
471	# Need to compare the address instead of __TI_SIE flag.
472	# Otherwise there would be a race between setting the flag
473	# and entering SIE (or leaving and clearing the flag). This
474	# would cause machine checks targeted at the guest to be
475	# handled by the host.
476	larl	%r14,.Lsie_entry
477	clgrjl	%r9,%r14, 4f
478	larl	%r14,.Lsie_leave
479	clgrjhe	%r9,%r14, 4f
480	lg	%r10,__LC_PCPU
481	oi	__PCPU_FLAGS+7(%r10), _CIF_MCCK_GUEST
4824:	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
483	SIEEXIT __SF_SIE_CONTROL(%r15),%r13
484#endif
485.Lmcck_user:
486	lg	%r15,__LC_MCCK_STACK(%r13)
487	la	%r11,STACK_FRAME_OVERHEAD(%r15)
488	stctg	%c1,%c1,__PT_CR1(%r11)
489	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r13)
490	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
491	lay	%r14,__LC_GPREGS_SAVE_AREA(%r13)
492	mvc	__PT_R0(128,%r11),0(%r14)
493	# clear user controlled registers to prevent speculative use
494	xgr	%r0,%r0
495	xgr	%r1,%r1
496	xgr	%r3,%r3
497	xgr	%r4,%r4
498	xgr	%r5,%r5
499	xgr	%r6,%r6
500	xgr	%r7,%r7
501	xgr	%r10,%r10
502	stmg	%r8,%r9,__PT_PSW(%r11)
503	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
504	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
505	lgr	%r2,%r11		# pass pointer to pt_regs
506	brasl	%r14,s390_do_machine_check
507	lctlg	%c1,%c1,__PT_CR1(%r11)
508	lmg	%r0,%r10,__PT_R0(%r11)
509	mvc	__LC_RETURN_MCCK_PSW(16,%r13),__PT_PSW(%r11) # move return PSW
510	tm	__LC_RETURN_MCCK_PSW+1(%r13),0x01 # returning to user ?
511	jno	0f
512	BPON
513	stpt	__LC_EXIT_TIMER(%r13)
5140:	ALTERNATIVE "brcl 0,0", __stringify(lay %r12,__LC_LAST_BREAK_SAVE_AREA(%r13)),\
515		ALT_FACILITY(193)
516	LBEAR	0(%r12)
517	lmg	%r11,%r15,__PT_R11(%r11)
518	LPSWEY	__LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE
519
520.Lmcck_panic:
521	/*
522	 * Iterate over all possible CPU addresses in the range 0..0xffff
523	 * and stop each CPU using signal processor. Use compare and swap
524	 * to allow just one CPU-stopper and prevent concurrent CPUs from
525	 * stopping each other while leaving the others running.
526	 */
527	lhi	%r5,0
528	lhi	%r6,1
529	larl	%r7,stop_lock
530	cs	%r5,%r6,0(%r7)		# single CPU-stopper only
531	jnz	4f
532	larl	%r7,this_cpu
533	stap	0(%r7)			# this CPU address
534	lh	%r4,0(%r7)
535	nilh	%r4,0
536	lhi	%r0,1
537	sll	%r0,16			# CPU counter
538	lhi	%r3,0			# next CPU address
5390:	cr	%r3,%r4
540	je	2f
5411:	sigp	%r1,%r3,SIGP_STOP	# stop next CPU
542	brc	SIGP_CC_BUSY,1b
5432:	ahi	%r3,1
544	brct	%r0,0b
5453:	sigp	%r1,%r4,SIGP_STOP	# stop this CPU
546	brc	SIGP_CC_BUSY,3b
5474:	j	4b
548SYM_CODE_END(mcck_int_handler)
549
550SYM_CODE_START(restart_int_handler)
551	ALTERNATIVE "nop", "lpp _LPP_OFFSET", ALT_FACILITY(40)
552	stg	%r15,__LC_SAVE_AREA_RESTART
553	TSTMSK	__LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
554	jz	0f
555	lctlg	%c0,%c15,__LC_CREGS_SAVE_AREA
5560:	larl	%r15,daton_psw
557	lpswe	0(%r15)				# turn dat on, keep irqs off
558.Ldaton:
559	GET_LC	%r15
560	lg	%r15,__LC_RESTART_STACK(%r15)
561	xc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
562	stmg	%r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
563	GET_LC	%r13
564	mvc	STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART(%r13)
565	mvc	STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW(%r13)
566	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
567	lg	%r1,__LC_RESTART_FN(%r13)	# load fn, parm & source cpu
568	lg	%r2,__LC_RESTART_DATA(%r13)
569	lgf	%r3,__LC_RESTART_SOURCE(%r13)
570	ltgr	%r3,%r3				# test source cpu address
571	jm	1f				# negative -> skip source stop
5720:	sigp	%r4,%r3,SIGP_SENSE		# sigp sense to source cpu
573	brc	10,0b				# wait for status stored
5741:	basr	%r14,%r1			# call function
575	stap	__SF_EMPTY(%r15)		# store cpu address
576	llgh	%r3,__SF_EMPTY(%r15)
5772:	sigp	%r4,%r3,SIGP_STOP		# sigp stop to current cpu
578	brc	2,2b
5793:	j	3b
580SYM_CODE_END(restart_int_handler)
581
582	__INIT
583SYM_CODE_START(early_pgm_check_handler)
584	STMG_LC %r8,%r15,__LC_SAVE_AREA
585	GET_LC	%r13
586	aghi	%r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE)
587	la	%r11,STACK_FRAME_OVERHEAD(%r15)
588	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
589	stmg	%r0,%r7,__PT_R0(%r11)
590	mvc	__PT_PSW(16,%r11),__LC_PGM_OLD_PSW(%r13)
591	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA(%r13)
592	lgr	%r2,%r11
593	brasl	%r14,__do_early_pgm_check
594	mvc	__LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
595	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
596	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
597SYM_CODE_END(early_pgm_check_handler)
598	__FINIT
599
600	.section .kprobes.text, "ax"
601
602#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
603/*
604 * The synchronous or the asynchronous stack overflowed. We are dead.
605 * No need to properly save the registers, we are going to panic anyway.
606 * Setup a pt_regs so that show_trace can provide a good call trace.
607 */
608SYM_CODE_START(stack_overflow)
609	GET_LC	%r15
610	lg	%r15,__LC_NODAT_STACK(%r15) # change to panic stack
611	la	%r11,STACK_FRAME_OVERHEAD(%r15)
612	stmg	%r0,%r7,__PT_R0(%r11)
613	stmg	%r8,%r9,__PT_PSW(%r11)
614	mvc	__PT_R8(64,%r11),0(%r14)
615	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
616	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
617	lgr	%r2,%r11		# pass pointer to pt_regs
618	jg	kernel_stack_overflow
619SYM_CODE_END(stack_overflow)
620#endif
621
622	.section .data, "aw"
623	.balign	4
624SYM_DATA_LOCAL(stop_lock,	.long 0)
625SYM_DATA_LOCAL(this_cpu,	.short 0)
626	.balign	8
627SYM_DATA_START_LOCAL(daton_psw)
628	.quad	PSW_KERNEL_BITS
629	.quad	.Ldaton
630SYM_DATA_END(daton_psw)
631
632	.section .rodata, "a"
633	.balign	8
634#define SYSCALL(esame,emu)	.quad __s390x_ ## esame
635SYM_DATA_START(sys_call_table)
636#include "asm/syscall_table.h"
637SYM_DATA_END(sys_call_table)
638#undef SYSCALL
639
640#ifdef CONFIG_COMPAT
641
642#define SYSCALL(esame,emu)	.quad __s390_ ## emu
643SYM_DATA_START(sys_call_table_emu)
644#include "asm/syscall_table.h"
645SYM_DATA_END(sys_call_table_emu)
646#undef SYSCALL
647#endif
648