xref: /linux/arch/s390/kernel/entry.S (revision e814f3fd16acfb7f9966773953de8f740a1e3202)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *    S390 low-level entry points.
4 *
5 *    Copyright IBM Corp. 1999, 2012
6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 *		 Hartmut Penner (hp@de.ibm.com),
8 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 */
10
11#include <linux/export.h>
12#include <linux/init.h>
13#include <linux/linkage.h>
14#include <asm/asm-extable.h>
15#include <asm/alternative.h>
16#include <asm/processor.h>
17#include <asm/cache.h>
18#include <asm/dwarf.h>
19#include <asm/errno.h>
20#include <asm/ptrace.h>
21#include <asm/thread_info.h>
22#include <asm/asm-offsets.h>
23#include <asm/unistd.h>
24#include <asm/page.h>
25#include <asm/sigp.h>
26#include <asm/irq.h>
27#include <asm/fpu-insn.h>
28#include <asm/setup.h>
29#include <asm/nmi.h>
30#include <asm/nospec-insn.h>
31#include <asm/lowcore.h>
32
33_LPP_OFFSET	= __LC_LPP
34
35	.macro STBEAR address
36	ALTERNATIVE "nop", ".insn s,0xb2010000,\address", ALT_FACILITY(193)
37	.endm
38
39	.macro LBEAR address
40	ALTERNATIVE "nop", ".insn s,0xb2000000,\address", ALT_FACILITY(193)
41	.endm
42
43	.macro LPSWEY address, lpswe
44	ALTERNATIVE_2 "b \lpswe;nopr", \
45		".insn siy,0xeb0000000071,\address,0", ALT_FACILITY(193),		\
46		__stringify(.insn siy,0xeb0000000071,LOWCORE_ALT_ADDRESS+\address,0),	\
47		ALT_LOWCORE
48	.endm
49
50	.macro MBEAR reg, lowcore
51	ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK(\lowcore)),\
52		ALT_FACILITY(193)
53	.endm
54
55	.macro	CHECK_VMAP_STACK savearea, lowcore, oklabel
56	lgr	%r14,%r15
57	nill	%r14,0x10000 - THREAD_SIZE
58	oill	%r14,STACK_INIT_OFFSET
59	clg	%r14,__LC_KERNEL_STACK(\lowcore)
60	je	\oklabel
61	clg	%r14,__LC_ASYNC_STACK(\lowcore)
62	je	\oklabel
63	clg	%r14,__LC_MCCK_STACK(\lowcore)
64	je	\oklabel
65	clg	%r14,__LC_NODAT_STACK(\lowcore)
66	je	\oklabel
67	clg	%r14,__LC_RESTART_STACK(\lowcore)
68	je	\oklabel
69	la	%r14,\savearea(\lowcore)
70	j	stack_overflow
71	.endm
72
73	/*
74	 * The TSTMSK macro generates a test-under-mask instruction by
75	 * calculating the memory offset for the specified mask value.
76	 * Mask value can be any constant.  The macro shifts the mask
77	 * value to calculate the memory offset for the test-under-mask
78	 * instruction.
79	 */
80	.macro TSTMSK addr, mask, size=8, bytepos=0
81		.if (\bytepos < \size) && (\mask >> 8)
82			.if (\mask & 0xff)
83				.error "Mask exceeds byte boundary"
84			.endif
85			TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
86			.exitm
87		.endif
88		.ifeq \mask
89			.error "Mask must not be zero"
90		.endif
91		off = \size - \bytepos - 1
92		tm	off+\addr, \mask
93	.endm
94
95	.macro BPOFF
96	ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", ALT_SPEC(82)
97	.endm
98
99	.macro BPON
100	ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", ALT_SPEC(82)
101	.endm
102
103	.macro BPENTER tif_ptr,tif_mask
104	ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \
105		    "j .+12; nop; nop", ALT_SPEC(82)
106	.endm
107
108	.macro BPEXIT tif_ptr,tif_mask
109	TSTMSK	\tif_ptr,\tif_mask
110	ALTERNATIVE "jz .+8;  .insn rrf,0xb2e80000,0,0,12,0", \
111		    "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", ALT_SPEC(82)
112	.endm
113
114#if IS_ENABLED(CONFIG_KVM)
115	.macro SIEEXIT sie_control,lowcore
116	lg	%r9,\sie_control			# get control block pointer
117	ni	__SIE_PROG0C+3(%r9),0xfe		# no longer in SIE
118	lctlg	%c1,%c1,__LC_KERNEL_ASCE(\lowcore)	# load primary asce
119	lg	%r9,__LC_CURRENT(\lowcore)
120	mvi	__TI_sie(%r9),0
121	larl	%r9,sie_exit			# skip forward to sie_exit
122	.endm
123#endif
124
125	.macro STACKLEAK_ERASE
126#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
127	brasl	%r14,stackleak_erase_on_task_stack
128#endif
129	.endm
130
131	GEN_BR_THUNK %r14
132
133	.section .kprobes.text, "ax"
134.Ldummy:
135	/*
136	 * The following nop exists only in order to avoid that the next
137	 * symbol starts at the beginning of the kprobes text section.
138	 * In that case there would be several symbols at the same address.
139	 * E.g. objdump would take an arbitrary symbol when disassembling
140	 * the code.
141	 * With the added nop in between this cannot happen.
142	 */
143	nop	0
144
145/*
146 * Scheduler resume function, called by __switch_to
147 *  gpr2 = (task_struct *)prev
148 *  gpr3 = (task_struct *)next
149 * Returns:
150 *  gpr2 = prev
151 */
152SYM_FUNC_START(__switch_to_asm)
153	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
154	lghi	%r4,__TASK_stack
155	lghi	%r1,__TASK_thread
156	llill	%r5,STACK_INIT_OFFSET
157	stg	%r15,__THREAD_ksp(%r1,%r2)	# store kernel stack of prev
158	lg	%r15,0(%r4,%r3)			# start of kernel stack of next
159	agr	%r15,%r5			# end of kernel stack of next
160	GET_LC	%r13
161	stg	%r3,__LC_CURRENT(%r13)		# store task struct of next
162	stg	%r15,__LC_KERNEL_STACK(%r13)	# store end of kernel stack
163	lg	%r15,__THREAD_ksp(%r1,%r3)	# load kernel stack of next
164	aghi	%r3,__TASK_pid
165	mvc	__LC_CURRENT_PID(4,%r13),0(%r3)	# store pid of next
166	ALTERNATIVE "nop", "lpp _LPP_OFFSET(%r13)", ALT_FACILITY(40)
167	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
168	BR_EX	%r14
169SYM_FUNC_END(__switch_to_asm)
170
171#if IS_ENABLED(CONFIG_KVM)
172/*
173 * __sie64a calling convention:
174 * %r2 pointer to sie control block phys
175 * %r3 pointer to sie control block virt
176 * %r4 guest register save area
177 * %r5 guest asce
178 */
179SYM_FUNC_START(__sie64a)
180	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
181	GET_LC	%r13
182	lg	%r14,__LC_CURRENT(%r13)
183	stg	%r2,__SF_SIE_CONTROL_PHYS(%r15)	# save sie block physical..
184	stg	%r3,__SF_SIE_CONTROL(%r15)	# ...and virtual addresses
185	stg	%r4,__SF_SIE_SAVEAREA(%r15)	# save guest register save area
186	stg	%r5,__SF_SIE_GUEST_ASCE(%r15)	# save guest asce
187	xc	__SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
188	mvc	__SF_SIE_FLAGS(8,%r15),__TI_flags(%r14) # copy thread flags
189	lmg	%r0,%r13,0(%r4)			# load guest gprs 0-13
190	mvi	__TI_sie(%r14),1
191	lctlg	%c1,%c1,__SF_SIE_GUEST_ASCE(%r15) # load primary asce
192	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
193	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
194	tm	__SIE_PROG20+3(%r14),3		# last exit...
195	jnz	.Lsie_skip
196	lg	%r14,__SF_SIE_CONTROL_PHYS(%r15)	# get sie block phys addr
197	BPEXIT	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
198.Lsie_entry:
199	sie	0(%r14)
200# Let the next instruction be NOP to avoid triggering a machine check
201# and handling it in a guest as result of the instruction execution.
202	nopr	7
203.Lsie_leave:
204	BPOFF
205	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
206.Lsie_skip:
207	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
208	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
209	GET_LC	%r14
210	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r14)	# load primary asce
211	lg	%r14,__LC_CURRENT(%r14)
212	mvi	__TI_sie(%r14),0
213SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL)
214	lg	%r14,__SF_SIE_SAVEAREA(%r15)	# load guest register save area
215	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
216	xgr	%r0,%r0				# clear guest registers to
217	xgr	%r1,%r1				# prevent speculative use
218	xgr	%r3,%r3
219	xgr	%r4,%r4
220	xgr	%r5,%r5
221	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
222	lg	%r2,__SF_SIE_REASON(%r15)	# return exit reason code
223	BR_EX	%r14
224SYM_FUNC_END(__sie64a)
225EXPORT_SYMBOL(__sie64a)
226EXPORT_SYMBOL(sie_exit)
227#endif
228
229/*
230 * SVC interrupt handler routine. System calls are synchronous events and
231 * are entered with interrupts disabled.
232 */
233
234SYM_CODE_START(system_call)
235	STMG_LC	%r8,%r15,__LC_SAVE_AREA
236	GET_LC	%r13
237	stpt	__LC_SYS_ENTER_TIMER(%r13)
238	BPOFF
239	lghi	%r14,0
240.Lsysc_per:
241	STBEAR	__LC_LAST_BREAK(%r13)
242	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r13)
243	lg	%r15,__LC_KERNEL_STACK(%r13)
244	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
245	stmg	%r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
246	# clear user controlled register to prevent speculative use
247	xgr	%r0,%r0
248	xgr	%r1,%r1
249	xgr	%r4,%r4
250	xgr	%r5,%r5
251	xgr	%r6,%r6
252	xgr	%r7,%r7
253	xgr	%r8,%r8
254	xgr	%r9,%r9
255	xgr	%r10,%r10
256	xgr	%r11,%r11
257	la	%r2,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
258	mvc	__PT_R8(64,%r2),__LC_SAVE_AREA(%r13)
259	MBEAR	%r2,%r13
260	lgr	%r3,%r14
261	brasl	%r14,__do_syscall
262	STACKLEAK_ERASE
263	lctlg	%c1,%c1,__LC_USER_ASCE(%r13)
264	mvc	__LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
265	BPON
266	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
267	stpt	__LC_EXIT_TIMER(%r13)
268	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
269	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
270SYM_CODE_END(system_call)
271
272#
273# a new process exits the kernel with ret_from_fork
274#
275SYM_CODE_START(ret_from_fork)
276	lgr	%r3,%r11
277	brasl	%r14,__ret_from_fork
278	STACKLEAK_ERASE
279	GET_LC	%r13
280	lctlg	%c1,%c1,__LC_USER_ASCE(%r13)
281	mvc	__LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
282	BPON
283	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
284	stpt	__LC_EXIT_TIMER(%r13)
285	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
286	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
287SYM_CODE_END(ret_from_fork)
288
289/*
290 * Program check handler routine
291 */
292
293SYM_CODE_START(pgm_check_handler)
294	STMG_LC	%r8,%r15,__LC_SAVE_AREA
295	GET_LC	%r13
296	stpt	__LC_SYS_ENTER_TIMER(%r13)
297	BPOFF
298	lmg	%r8,%r9,__LC_PGM_OLD_PSW(%r13)
299	xgr	%r10,%r10
300	tmhh	%r8,0x0001		# coming from user space?
301	jno	.Lpgm_skip_asce
302	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r13)
303	j	3f			# -> fault in user space
304.Lpgm_skip_asce:
305#if IS_ENABLED(CONFIG_KVM)
306	lg	%r11,__LC_CURRENT(%r13)
307	tm	__TI_sie(%r11),0xff
308	jz	1f
309	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
310	SIEEXIT __SF_SIE_CONTROL(%r15),%r13
311	lghi	%r10,_PIF_GUEST_FAULT
312#endif
3131:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
314	jnz	2f			# -> enabled, can't be a double fault
315	tm	__LC_PGM_ILC+3(%r13),0x80	# check for per exception
316	jnz	.Lpgm_svcper		# -> single stepped svc
3172:	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
318	# CHECK_VMAP_STACK branches to stack_overflow or 4f
319	CHECK_VMAP_STACK __LC_SAVE_AREA,%r13,4f
3203:	lg	%r15,__LC_KERNEL_STACK(%r13)
3214:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
322	stg	%r10,__PT_FLAGS(%r11)
323	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
324	stmg	%r0,%r7,__PT_R0(%r11)
325	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA(%r13)
326	mvc	__PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK(%r13)
327	stmg	%r8,%r9,__PT_PSW(%r11)
328	# clear user controlled registers to prevent speculative use
329	xgr	%r0,%r0
330	xgr	%r1,%r1
331	xgr	%r3,%r3
332	xgr	%r4,%r4
333	xgr	%r5,%r5
334	xgr	%r6,%r6
335	xgr	%r7,%r7
336	xgr	%r12,%r12
337	lgr	%r2,%r11
338	brasl	%r14,__do_pgm_check
339	tmhh	%r8,0x0001		# returning to user space?
340	jno	.Lpgm_exit_kernel
341	STACKLEAK_ERASE
342	lctlg	%c1,%c1,__LC_USER_ASCE(%r13)
343	BPON
344	stpt	__LC_EXIT_TIMER(%r13)
345.Lpgm_exit_kernel:
346	mvc	__LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
347	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
348	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
349	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
350
351#
352# single stepped system call
353#
354.Lpgm_svcper:
355	mvc	__LC_RETURN_PSW(8,%r13),__LC_SVC_NEW_PSW(%r13)
356	larl	%r14,.Lsysc_per
357	stg	%r14,__LC_RETURN_PSW+8(%r13)
358	lghi	%r14,1
359	LBEAR	__LC_PGM_LAST_BREAK(%r13)
360	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per
361SYM_CODE_END(pgm_check_handler)
362
363/*
364 * Interrupt handler macro used for external and IO interrupts.
365 */
366.macro INT_HANDLER name,lc_old_psw,handler
367SYM_CODE_START(\name)
368	STMG_LC	%r8,%r15,__LC_SAVE_AREA
369	GET_LC	%r13
370	stckf	__LC_INT_CLOCK(%r13)
371	stpt	__LC_SYS_ENTER_TIMER(%r13)
372	STBEAR	__LC_LAST_BREAK(%r13)
373	BPOFF
374	lmg	%r8,%r9,\lc_old_psw(%r13)
375	tmhh	%r8,0x0001			# interrupting from user ?
376	jnz	1f
377#if IS_ENABLED(CONFIG_KVM)
378	lg	%r10,__LC_CURRENT(%r13)
379	tm	__TI_sie(%r10),0xff
380	jz	0f
381	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
382	SIEEXIT __SF_SIE_CONTROL(%r15),%r13
383#endif
3840:	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
385	j	2f
3861:	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r13)
387	lg	%r15,__LC_KERNEL_STACK(%r13)
3882:	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
389	la	%r11,STACK_FRAME_OVERHEAD(%r15)
390	stmg	%r0,%r7,__PT_R0(%r11)
391	# clear user controlled registers to prevent speculative use
392	xgr	%r0,%r0
393	xgr	%r1,%r1
394	xgr	%r3,%r3
395	xgr	%r4,%r4
396	xgr	%r5,%r5
397	xgr	%r6,%r6
398	xgr	%r7,%r7
399	xgr	%r10,%r10
400	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
401	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA(%r13)
402	MBEAR	%r11,%r13
403	stmg	%r8,%r9,__PT_PSW(%r11)
404	lgr	%r2,%r11		# pass pointer to pt_regs
405	brasl	%r14,\handler
406	mvc	__LC_RETURN_PSW(16,%r13),__PT_PSW(%r11)
407	tmhh	%r8,0x0001		# returning to user ?
408	jno	2f
409	STACKLEAK_ERASE
410	lctlg	%c1,%c1,__LC_USER_ASCE(%r13)
411	BPON
412	stpt	__LC_EXIT_TIMER(%r13)
4132:	LBEAR	__PT_LAST_BREAK(%r11)
414	lmg	%r0,%r15,__PT_R0(%r11)
415	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
416SYM_CODE_END(\name)
417.endm
418
419	.section .irqentry.text, "ax"
420
421INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
422INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
423
424	.section .kprobes.text, "ax"
425
426/*
427 * Machine check handler routines
428 */
429SYM_CODE_START(mcck_int_handler)
430	BPOFF
431	GET_LC	%r13
432	lmg	%r8,%r9,__LC_MCK_OLD_PSW(%r13)
433	TSTMSK	__LC_MCCK_CODE(%r13),MCCK_CODE_SYSTEM_DAMAGE
434	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
435	TSTMSK	__LC_MCCK_CODE(%r13),MCCK_CODE_CR_VALID
436	jno	.Lmcck_panic		# control registers invalid -> panic
437	ptlb
438	lay	%r14,__LC_CPU_TIMER_SAVE_AREA(%r13)
439	mvc	__LC_MCCK_ENTER_TIMER(8,%r13),0(%r14)
440	TSTMSK	__LC_MCCK_CODE(%r13),MCCK_CODE_CPU_TIMER_VALID
441	jo	3f
442	la	%r14,__LC_SYS_ENTER_TIMER(%r13)
443	clc	0(8,%r14),__LC_EXIT_TIMER(%r13)
444	jl	1f
445	la	%r14,__LC_EXIT_TIMER(%r13)
4461:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER(%r13)
447	jl	2f
448	la	%r14,__LC_LAST_UPDATE_TIMER(%r13)
4492:	spt	0(%r14)
450	mvc	__LC_MCCK_ENTER_TIMER(8,%r13),0(%r14)
4513:	TSTMSK	__LC_MCCK_CODE(%r13),MCCK_CODE_PSW_MWP_VALID
452	jno	.Lmcck_panic
453	tmhh	%r8,0x0001		# interrupting from user ?
454	jnz	.Lmcck_user
455	TSTMSK	__LC_MCCK_CODE(%r13),MCCK_CODE_PSW_IA_VALID
456	jno	.Lmcck_panic
457#if IS_ENABLED(CONFIG_KVM)
458	lg	%r10,__LC_CURRENT(%r13)
459	tm	__TI_sie(%r10),0xff
460	jz	.Lmcck_user
461	# Need to compare the address instead of __TI_SIE flag.
462	# Otherwise there would be a race between setting the flag
463	# and entering SIE (or leaving and clearing the flag). This
464	# would cause machine checks targeted at the guest to be
465	# handled by the host.
466	larl	%r14,.Lsie_entry
467	clgrjl	%r9,%r14, 4f
468	larl	%r14,.Lsie_leave
469	clgrjhe	%r9,%r14, 4f
470	lg	%r10,__LC_PCPU
471	oi	__PCPU_FLAGS+7(%r10), _CIF_MCCK_GUEST
4724:	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
473	SIEEXIT __SF_SIE_CONTROL(%r15),%r13
474#endif
475.Lmcck_user:
476	lg	%r15,__LC_MCCK_STACK(%r13)
477	la	%r11,STACK_FRAME_OVERHEAD(%r15)
478	stctg	%c1,%c1,__PT_CR1(%r11)
479	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r13)
480	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
481	lay	%r14,__LC_GPREGS_SAVE_AREA(%r13)
482	mvc	__PT_R0(128,%r11),0(%r14)
483	# clear user controlled registers to prevent speculative use
484	xgr	%r0,%r0
485	xgr	%r1,%r1
486	xgr	%r3,%r3
487	xgr	%r4,%r4
488	xgr	%r5,%r5
489	xgr	%r6,%r6
490	xgr	%r7,%r7
491	xgr	%r10,%r10
492	stmg	%r8,%r9,__PT_PSW(%r11)
493	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
494	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
495	lgr	%r2,%r11		# pass pointer to pt_regs
496	brasl	%r14,s390_do_machine_check
497	lctlg	%c1,%c1,__PT_CR1(%r11)
498	lmg	%r0,%r10,__PT_R0(%r11)
499	mvc	__LC_RETURN_MCCK_PSW(16,%r13),__PT_PSW(%r11) # move return PSW
500	tm	__LC_RETURN_MCCK_PSW+1(%r13),0x01 # returning to user ?
501	jno	0f
502	BPON
503	stpt	__LC_EXIT_TIMER(%r13)
5040:	ALTERNATIVE "brcl 0,0", __stringify(lay %r12,__LC_LAST_BREAK_SAVE_AREA(%r13)),\
505		ALT_FACILITY(193)
506	LBEAR	0(%r12)
507	lmg	%r11,%r15,__PT_R11(%r11)
508	LPSWEY	__LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE
509
510.Lmcck_panic:
511	/*
512	 * Iterate over all possible CPU addresses in the range 0..0xffff
513	 * and stop each CPU using signal processor. Use compare and swap
514	 * to allow just one CPU-stopper and prevent concurrent CPUs from
515	 * stopping each other while leaving the others running.
516	 */
517	lhi	%r5,0
518	lhi	%r6,1
519	larl	%r7,stop_lock
520	cs	%r5,%r6,0(%r7)		# single CPU-stopper only
521	jnz	4f
522	larl	%r7,this_cpu
523	stap	0(%r7)			# this CPU address
524	lh	%r4,0(%r7)
525	nilh	%r4,0
526	lhi	%r0,1
527	sll	%r0,16			# CPU counter
528	lhi	%r3,0			# next CPU address
5290:	cr	%r3,%r4
530	je	2f
5311:	sigp	%r1,%r3,SIGP_STOP	# stop next CPU
532	brc	SIGP_CC_BUSY,1b
5332:	ahi	%r3,1
534	brct	%r0,0b
5353:	sigp	%r1,%r4,SIGP_STOP	# stop this CPU
536	brc	SIGP_CC_BUSY,3b
5374:	j	4b
538SYM_CODE_END(mcck_int_handler)
539
540SYM_CODE_START(restart_int_handler)
541	ALTERNATIVE "nop", "lpp _LPP_OFFSET", ALT_FACILITY(40)
542	stg	%r15,__LC_SAVE_AREA_RESTART
543	TSTMSK	__LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
544	jz	0f
545	lctlg	%c0,%c15,__LC_CREGS_SAVE_AREA
5460:	larl	%r15,daton_psw
547	lpswe	0(%r15)				# turn dat on, keep irqs off
548.Ldaton:
549	GET_LC	%r15
550	lg	%r15,__LC_RESTART_STACK(%r15)
551	xc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
552	stmg	%r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
553	GET_LC	%r13
554	mvc	STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART(%r13)
555	mvc	STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW(%r13)
556	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
557	lg	%r1,__LC_RESTART_FN(%r13)	# load fn, parm & source cpu
558	lg	%r2,__LC_RESTART_DATA(%r13)
559	lgf	%r3,__LC_RESTART_SOURCE(%r13)
560	ltgr	%r3,%r3				# test source cpu address
561	jm	1f				# negative -> skip source stop
5620:	sigp	%r4,%r3,SIGP_SENSE		# sigp sense to source cpu
563	brc	10,0b				# wait for status stored
5641:	basr	%r14,%r1			# call function
565	stap	__SF_EMPTY(%r15)		# store cpu address
566	llgh	%r3,__SF_EMPTY(%r15)
5672:	sigp	%r4,%r3,SIGP_STOP		# sigp stop to current cpu
568	brc	2,2b
5693:	j	3b
570SYM_CODE_END(restart_int_handler)
571
572	__INIT
573SYM_CODE_START(early_pgm_check_handler)
574	STMG_LC %r8,%r15,__LC_SAVE_AREA
575	GET_LC	%r13
576	aghi	%r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE)
577	la	%r11,STACK_FRAME_OVERHEAD(%r15)
578	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
579	stmg	%r0,%r7,__PT_R0(%r11)
580	mvc	__PT_PSW(16,%r11),__LC_PGM_OLD_PSW(%r13)
581	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA(%r13)
582	lgr	%r2,%r11
583	brasl	%r14,__do_early_pgm_check
584	mvc	__LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
585	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
586	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
587SYM_CODE_END(early_pgm_check_handler)
588	__FINIT
589
590	.section .kprobes.text, "ax"
591
592/*
593 * The synchronous or the asynchronous stack overflowed. We are dead.
594 * No need to properly save the registers, we are going to panic anyway.
595 * Setup a pt_regs so that show_trace can provide a good call trace.
596 */
597SYM_CODE_START(stack_overflow)
598	GET_LC	%r15
599	lg	%r15,__LC_NODAT_STACK(%r15) # change to panic stack
600	la	%r11,STACK_FRAME_OVERHEAD(%r15)
601	stmg	%r0,%r7,__PT_R0(%r11)
602	stmg	%r8,%r9,__PT_PSW(%r11)
603	mvc	__PT_R8(64,%r11),0(%r14)
604	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
605	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
606	lgr	%r2,%r11		# pass pointer to pt_regs
607	jg	kernel_stack_overflow
608SYM_CODE_END(stack_overflow)
609
610	.section .data, "aw"
611	.balign	4
612SYM_DATA_LOCAL(stop_lock,	.long 0)
613SYM_DATA_LOCAL(this_cpu,	.short 0)
614	.balign	8
615SYM_DATA_START_LOCAL(daton_psw)
616	.quad	PSW_KERNEL_BITS
617	.quad	.Ldaton
618SYM_DATA_END(daton_psw)
619
620	.section .rodata, "a"
621	.balign	8
622#define SYSCALL(esame,emu)	.quad __s390x_ ## esame
623SYM_DATA_START(sys_call_table)
624#include "asm/syscall_table.h"
625SYM_DATA_END(sys_call_table)
626#undef SYSCALL
627
628#ifdef CONFIG_COMPAT
629
630#define SYSCALL(esame,emu)	.quad __s390_ ## emu
631SYM_DATA_START(sys_call_table_emu)
632#include "asm/syscall_table.h"
633SYM_DATA_END(sys_call_table_emu)
634#undef SYSCALL
635#endif
636