xref: /linux/arch/s390/kernel/entry.S (revision 9dbbc3b9d09d6deba9f3b9e1d5b355032ed46a75)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *    S390 low-level entry points.
4 *
5 *    Copyright IBM Corp. 1999, 2012
6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 *		 Hartmut Penner (hp@de.ibm.com),
8 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 *		 Heiko Carstens <heiko.carstens@de.ibm.com>
10 */
11
12#include <linux/init.h>
13#include <linux/linkage.h>
14#include <asm/alternative-asm.h>
15#include <asm/processor.h>
16#include <asm/cache.h>
17#include <asm/ctl_reg.h>
18#include <asm/dwarf.h>
19#include <asm/errno.h>
20#include <asm/ptrace.h>
21#include <asm/thread_info.h>
22#include <asm/asm-offsets.h>
23#include <asm/unistd.h>
24#include <asm/page.h>
25#include <asm/sigp.h>
26#include <asm/irq.h>
27#include <asm/vx-insn.h>
28#include <asm/setup.h>
29#include <asm/nmi.h>
30#include <asm/export.h>
31#include <asm/nospec-insn.h>
32
33__PT_R0      =	__PT_GPRS
34__PT_R1      =	__PT_GPRS + 8
35__PT_R2      =	__PT_GPRS + 16
36__PT_R3      =	__PT_GPRS + 24
37__PT_R4      =	__PT_GPRS + 32
38__PT_R5      =	__PT_GPRS + 40
39__PT_R6      =	__PT_GPRS + 48
40__PT_R7      =	__PT_GPRS + 56
41__PT_R8      =	__PT_GPRS + 64
42__PT_R9      =	__PT_GPRS + 72
43__PT_R10     =	__PT_GPRS + 80
44__PT_R11     =	__PT_GPRS + 88
45__PT_R12     =	__PT_GPRS + 96
46__PT_R13     =	__PT_GPRS + 104
47__PT_R14     =	__PT_GPRS + 112
48__PT_R15     =	__PT_GPRS + 120
49
50STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
51STACK_SIZE  = 1 << STACK_SHIFT
52STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
53
54_LPP_OFFSET	= __LC_LPP
55
56	.macro	CHECK_STACK savearea
57#ifdef CONFIG_CHECK_STACK
58	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
59	lghi	%r14,\savearea
60	jz	stack_overflow
61#endif
62	.endm
63
64	.macro	CHECK_VMAP_STACK savearea,oklabel
65#ifdef CONFIG_VMAP_STACK
66	lgr	%r14,%r15
67	nill	%r14,0x10000 - STACK_SIZE
68	oill	%r14,STACK_INIT
69	clg	%r14,__LC_KERNEL_STACK
70	je	\oklabel
71	clg	%r14,__LC_ASYNC_STACK
72	je	\oklabel
73	clg	%r14,__LC_MCCK_STACK
74	je	\oklabel
75	clg	%r14,__LC_NODAT_STACK
76	je	\oklabel
77	clg	%r14,__LC_RESTART_STACK
78	je	\oklabel
79	lghi	%r14,\savearea
80	j	stack_overflow
81#else
82	j	\oklabel
83#endif
84	.endm
85
86	.macro STCK savearea
87	ALTERNATIVE ".insn	s,0xb2050000,\savearea", \
88		    ".insn	s,0xb27c0000,\savearea", 25
89	.endm
90
91	/*
92	 * The TSTMSK macro generates a test-under-mask instruction by
93	 * calculating the memory offset for the specified mask value.
94	 * Mask value can be any constant.  The macro shifts the mask
95	 * value to calculate the memory offset for the test-under-mask
96	 * instruction.
97	 */
98	.macro TSTMSK addr, mask, size=8, bytepos=0
99		.if (\bytepos < \size) && (\mask >> 8)
100			.if (\mask & 0xff)
101				.error "Mask exceeds byte boundary"
102			.endif
103			TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
104			.exitm
105		.endif
106		.ifeq \mask
107			.error "Mask must not be zero"
108		.endif
109		off = \size - \bytepos - 1
110		tm	off+\addr, \mask
111	.endm
112
113	.macro BPOFF
114	ALTERNATIVE "", ".long 0xb2e8c000", 82
115	.endm
116
117	.macro BPON
118	ALTERNATIVE "", ".long 0xb2e8d000", 82
119	.endm
120
121	.macro BPENTER tif_ptr,tif_mask
122	ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \
123		    "", 82
124	.endm
125
126	.macro BPEXIT tif_ptr,tif_mask
127	TSTMSK	\tif_ptr,\tif_mask
128	ALTERNATIVE "jz .+8;  .long 0xb2e8c000", \
129		    "jnz .+8; .long 0xb2e8d000", 82
130	.endm
131
132#if IS_ENABLED(CONFIG_KVM)
133	/*
134	 * The OUTSIDE macro jumps to the provided label in case the value
135	 * in the provided register is outside of the provided range. The
136	 * macro is useful for checking whether a PSW stored in a register
137	 * pair points inside or outside of a block of instructions.
138	 * @reg: register to check
139	 * @start: start of the range
140	 * @end: end of the range
141	 * @outside_label: jump here if @reg is outside of [@start..@end)
142	 */
143	.macro OUTSIDE reg,start,end,outside_label
144	lgr	%r14,\reg
145	larl	%r13,\start
146	slgr	%r14,%r13
147	lghi	%r13,\end - \start
148	clgr	%r14,%r13
149	jhe	\outside_label
150	.endm
151#endif
152
153	GEN_BR_THUNK %r14
154	GEN_BR_THUNK %r14,%r13
155
156	.section .kprobes.text, "ax"
157.Ldummy:
158	/*
159	 * This nop exists only in order to avoid that __bpon starts at
160	 * the beginning of the kprobes text section. In that case we would
161	 * have several symbols at the same address. E.g. objdump would take
162	 * an arbitrary symbol name when disassembling this code.
163	 * With the added nop in between the __bpon symbol is unique
164	 * again.
165	 */
166	nop	0
167
168ENTRY(__bpon)
169	.globl __bpon
170	BPON
171	BR_EX	%r14
172ENDPROC(__bpon)
173
174/*
175 * Scheduler resume function, called by switch_to
176 *  gpr2 = (task_struct *) prev
177 *  gpr3 = (task_struct *) next
178 * Returns:
179 *  gpr2 = prev
180 */
181ENTRY(__switch_to)
182	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
183	lghi	%r4,__TASK_stack
184	lghi	%r1,__TASK_thread
185	llill	%r5,STACK_INIT
186	stg	%r15,__THREAD_ksp(%r1,%r2)	# store kernel stack of prev
187	lg	%r15,0(%r4,%r3)			# start of kernel stack of next
188	agr	%r15,%r5			# end of kernel stack of next
189	stg	%r3,__LC_CURRENT		# store task struct of next
190	stg	%r15,__LC_KERNEL_STACK		# store end of kernel stack
191	lg	%r15,__THREAD_ksp(%r1,%r3)	# load kernel stack of next
192	aghi	%r3,__TASK_pid
193	mvc	__LC_CURRENT_PID(4,%r0),0(%r3)	# store pid of next
194	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
195	ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
196	BR_EX	%r14
197ENDPROC(__switch_to)
198
199#if IS_ENABLED(CONFIG_KVM)
200/*
201 * sie64a calling convention:
202 * %r2 pointer to sie control block
203 * %r3 guest register save area
204 */
205ENTRY(sie64a)
206	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
207	lg	%r12,__LC_CURRENT
208	stg	%r2,__SF_SIE_CONTROL(%r15)	# save control block pointer
209	stg	%r3,__SF_SIE_SAVEAREA(%r15)	# save guest register save area
210	xc	__SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
211	mvc	__SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
212	lmg	%r0,%r13,0(%r3)			# load guest gprs 0-13
213	lg	%r14,__LC_GMAP			# get gmap pointer
214	ltgr	%r14,%r14
215	jz	.Lsie_gmap
216	lctlg	%c1,%c1,__GMAP_ASCE(%r14)	# load primary asce
217.Lsie_gmap:
218	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
219	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
220	tm	__SIE_PROG20+3(%r14),3		# last exit...
221	jnz	.Lsie_skip
222	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
223	jo	.Lsie_skip			# exit if fp/vx regs changed
224	BPEXIT	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
225.Lsie_entry:
226	sie	0(%r14)
227	BPOFF
228	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
229.Lsie_skip:
230	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
231	lctlg	%c1,%c1,__LC_KERNEL_ASCE	# load primary asce
232.Lsie_done:
233# some program checks are suppressing. C code (e.g. do_protection_exception)
234# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
235# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
236# Other instructions between sie64a and .Lsie_done should not cause program
237# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
238# See also .Lcleanup_sie
239.Lrewind_pad6:
240	nopr	7
241.Lrewind_pad4:
242	nopr	7
243.Lrewind_pad2:
244	nopr	7
245	.globl sie_exit
246sie_exit:
247	lg	%r14,__SF_SIE_SAVEAREA(%r15)	# load guest register save area
248	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
249	xgr	%r0,%r0				# clear guest registers to
250	xgr	%r1,%r1				# prevent speculative use
251	xgr	%r3,%r3
252	xgr	%r4,%r4
253	xgr	%r5,%r5
254	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
255	lg	%r2,__SF_SIE_REASON(%r15)	# return exit reason code
256	BR_EX	%r14
257.Lsie_fault:
258	lghi	%r14,-EFAULT
259	stg	%r14,__SF_SIE_REASON(%r15)	# set exit reason code
260	j	sie_exit
261
262	EX_TABLE(.Lrewind_pad6,.Lsie_fault)
263	EX_TABLE(.Lrewind_pad4,.Lsie_fault)
264	EX_TABLE(.Lrewind_pad2,.Lsie_fault)
265	EX_TABLE(sie_exit,.Lsie_fault)
266ENDPROC(sie64a)
267EXPORT_SYMBOL(sie64a)
268EXPORT_SYMBOL(sie_exit)
269#endif
270
271/*
272 * SVC interrupt handler routine. System calls are synchronous events and
273 * are entered with interrupts disabled.
274 */
275
276ENTRY(system_call)
277	stpt	__LC_SYS_ENTER_TIMER
278	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
279	BPOFF
280	lghi	%r14,0
281.Lsysc_per:
282	lctlg	%c1,%c1,__LC_KERNEL_ASCE
283	lg	%r12,__LC_CURRENT
284	lg	%r15,__LC_KERNEL_STACK
285	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
286	stmg	%r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
287	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
288	# clear user controlled register to prevent speculative use
289	xgr	%r0,%r0
290	xgr	%r1,%r1
291	xgr	%r4,%r4
292	xgr	%r5,%r5
293	xgr	%r6,%r6
294	xgr	%r7,%r7
295	xgr	%r8,%r8
296	xgr	%r9,%r9
297	xgr	%r10,%r10
298	xgr	%r11,%r11
299	la	%r2,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
300	mvc	__PT_R8(64,%r2),__LC_SAVE_AREA_SYNC
301	lgr	%r3,%r14
302	brasl	%r14,__do_syscall
303	lctlg	%c1,%c1,__LC_USER_ASCE
304	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
305	BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
306	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
307	stpt	__LC_EXIT_TIMER
308	b	__LC_RETURN_LPSWE
309ENDPROC(system_call)
310
311#
312# a new process exits the kernel with ret_from_fork
313#
314ENTRY(ret_from_fork)
315	lgr	%r3,%r11
316	brasl	%r14,__ret_from_fork
317	lctlg	%c1,%c1,__LC_USER_ASCE
318	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
319	BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
320	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
321	stpt	__LC_EXIT_TIMER
322	b	__LC_RETURN_LPSWE
323ENDPROC(ret_from_fork)
324
325/*
326 * Program check handler routine
327 */
328
329ENTRY(pgm_check_handler)
330	stpt	__LC_SYS_ENTER_TIMER
331	BPOFF
332	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
333	lg	%r12,__LC_CURRENT
334	lghi	%r10,0
335	lmg	%r8,%r9,__LC_PGM_OLD_PSW
336	tmhh	%r8,0x0001		# coming from user space?
337	jno	.Lpgm_skip_asce
338	lctlg	%c1,%c1,__LC_KERNEL_ASCE
339	j	3f			# -> fault in user space
340.Lpgm_skip_asce:
341#if IS_ENABLED(CONFIG_KVM)
342	# cleanup critical section for program checks in sie64a
343	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,1f
344	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
345	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
346	lctlg	%c1,%c1,__LC_KERNEL_ASCE	# load primary asce
347	larl	%r9,sie_exit			# skip forward to sie_exit
348	lghi	%r10,_PIF_GUEST_FAULT
349#endif
3501:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
351	jnz	2f			# -> enabled, can't be a double fault
352	tm	__LC_PGM_ILC+3,0x80	# check for per exception
353	jnz	.Lpgm_svcper		# -> single stepped svc
3542:	CHECK_STACK __LC_SAVE_AREA_SYNC
355	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
356	# CHECK_VMAP_STACK branches to stack_overflow or 4f
357	CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
3583:	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
359	lg	%r15,__LC_KERNEL_STACK
3604:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
361	stg	%r10,__PT_FLAGS(%r11)
362	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
363	stmg	%r0,%r7,__PT_R0(%r11)
364	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
365	stmg	%r8,%r9,__PT_PSW(%r11)
366
367	# clear user controlled registers to prevent speculative use
368	xgr	%r0,%r0
369	xgr	%r1,%r1
370	xgr	%r3,%r3
371	xgr	%r4,%r4
372	xgr	%r5,%r5
373	xgr	%r6,%r6
374	xgr	%r7,%r7
375	lgr	%r2,%r11
376	brasl	%r14,__do_pgm_check
377	tmhh	%r8,0x0001		# returning to user space?
378	jno	.Lpgm_exit_kernel
379	lctlg	%c1,%c1,__LC_USER_ASCE
380	BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
381	stpt	__LC_EXIT_TIMER
382.Lpgm_exit_kernel:
383	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
384	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
385	b	__LC_RETURN_LPSWE
386
387#
388# single stepped system call
389#
390.Lpgm_svcper:
391	mvc	__LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
392	larl	%r14,.Lsysc_per
393	stg	%r14,__LC_RETURN_PSW+8
394	lghi	%r14,1
395	lpswe	__LC_RETURN_PSW		# branch to .Lsysc_per
396ENDPROC(pgm_check_handler)
397
398/*
399 * Interrupt handler macro used for external and IO interrupts.
400 */
401.macro INT_HANDLER name,lc_old_psw,handler
402ENTRY(\name)
403	STCK	__LC_INT_CLOCK
404	stpt	__LC_SYS_ENTER_TIMER
405	BPOFF
406	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
407	lg	%r12,__LC_CURRENT
408	lmg	%r8,%r9,\lc_old_psw
409	tmhh	%r8,0x0001			# interrupting from user ?
410	jnz	1f
411#if IS_ENABLED(CONFIG_KVM)
412	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,0f
413	brasl	%r14,.Lcleanup_sie
414#endif
4150:	CHECK_STACK __LC_SAVE_AREA_ASYNC
416	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
417	j	2f
4181:	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
419	lctlg	%c1,%c1,__LC_KERNEL_ASCE
420	lg	%r15,__LC_KERNEL_STACK
4212:	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
422	la	%r11,STACK_FRAME_OVERHEAD(%r15)
423	stmg	%r0,%r7,__PT_R0(%r11)
424	# clear user controlled registers to prevent speculative use
425	xgr	%r0,%r0
426	xgr	%r1,%r1
427	xgr	%r3,%r3
428	xgr	%r4,%r4
429	xgr	%r5,%r5
430	xgr	%r6,%r6
431	xgr	%r7,%r7
432	xgr	%r10,%r10
433	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
434	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
435	stmg	%r8,%r9,__PT_PSW(%r11)
436	tm	%r8,0x0001		# coming from user space?
437	jno	1f
438	lctlg	%c1,%c1,__LC_KERNEL_ASCE
4391:	lgr	%r2,%r11		# pass pointer to pt_regs
440	brasl	%r14,\handler
441	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
442	tmhh	%r8,0x0001		# returning to user ?
443	jno	2f
444	lctlg	%c1,%c1,__LC_USER_ASCE
445	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
446	stpt	__LC_EXIT_TIMER
4472:	lmg	%r0,%r15,__PT_R0(%r11)
448	b	__LC_RETURN_LPSWE
449ENDPROC(\name)
450.endm
451
452INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
453INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
454
455/*
456 * Load idle PSW.
457 */
458ENTRY(psw_idle)
459	stg	%r14,(__SF_GPRS+8*8)(%r15)
460	stg	%r3,__SF_EMPTY(%r15)
461	larl	%r1,psw_idle_exit
462	stg	%r1,__SF_EMPTY+8(%r15)
463	larl	%r1,smp_cpu_mtid
464	llgf	%r1,0(%r1)
465	ltgr	%r1,%r1
466	jz	.Lpsw_idle_stcctm
467	.insn	rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2)
468.Lpsw_idle_stcctm:
469	oi	__LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
470	BPON
471	STCK	__CLOCK_IDLE_ENTER(%r2)
472	stpt	__TIMER_IDLE_ENTER(%r2)
473	lpswe	__SF_EMPTY(%r15)
474.globl psw_idle_exit
475psw_idle_exit:
476	BR_EX	%r14
477ENDPROC(psw_idle)
478
479/*
480 * Machine check handler routines
481 */
482ENTRY(mcck_int_handler)
483	STCK	__LC_MCCK_CLOCK
484	BPOFF
485	la	%r1,4095		# validate r1
486	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# validate cpu timer
487	sckc	__LC_CLOCK_COMPARATOR			# validate comparator
488	lam	%a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs
489	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
490	lg	%r12,__LC_CURRENT
491	lmg	%r8,%r9,__LC_MCK_OLD_PSW
492	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
493	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
494	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CR_VALID
495	jno	.Lmcck_panic		# control registers invalid -> panic
496	la	%r14,4095
497	lctlg	%c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
498	ptlb
499	lg	%r11,__LC_MCESAD-4095(%r14) # extended machine check save area
500	nill	%r11,0xfc00		# MCESA_ORIGIN_MASK
501	TSTMSK	__LC_CREGS_SAVE_AREA+16-4095(%r14),CR2_GUARDED_STORAGE
502	jno	0f
503	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_GS_VALID
504	jno	0f
505	.insn	 rxy,0xe3000000004d,0,__MCESA_GS_SAVE_AREA(%r11) # LGSC
5060:	l	%r14,__LC_FP_CREG_SAVE_AREA-4095(%r14)
507	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_FC_VALID
508	jo	0f
509	sr	%r14,%r14
5100:	sfpc	%r14
511	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
512	jo	0f
513	lghi	%r14,__LC_FPREGS_SAVE_AREA
514	ld	%f0,0(%r14)
515	ld	%f1,8(%r14)
516	ld	%f2,16(%r14)
517	ld	%f3,24(%r14)
518	ld	%f4,32(%r14)
519	ld	%f5,40(%r14)
520	ld	%f6,48(%r14)
521	ld	%f7,56(%r14)
522	ld	%f8,64(%r14)
523	ld	%f9,72(%r14)
524	ld	%f10,80(%r14)
525	ld	%f11,88(%r14)
526	ld	%f12,96(%r14)
527	ld	%f13,104(%r14)
528	ld	%f14,112(%r14)
529	ld	%f15,120(%r14)
530	j	1f
5310:	VLM	%v0,%v15,0,%r11
532	VLM	%v16,%v31,256,%r11
5331:	lghi	%r14,__LC_CPU_TIMER_SAVE_AREA
534	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
535	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
536	jo	3f
537	la	%r14,__LC_SYS_ENTER_TIMER
538	clc	0(8,%r14),__LC_EXIT_TIMER
539	jl	1f
540	la	%r14,__LC_EXIT_TIMER
5411:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
542	jl	2f
543	la	%r14,__LC_LAST_UPDATE_TIMER
5442:	spt	0(%r14)
545	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
5463:	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
547	jno	.Lmcck_panic
548	tmhh	%r8,0x0001		# interrupting from user ?
549	jnz	4f
550	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
551	jno	.Lmcck_panic
5524:	ssm	__LC_PGM_NEW_PSW	# turn dat on, keep irqs off
553	tmhh	%r8,0x0001			# interrupting from user ?
554	jnz	.Lmcck_user
555#if IS_ENABLED(CONFIG_KVM)
556	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,.Lmcck_stack
557	OUTSIDE	%r9,.Lsie_entry,.Lsie_skip,5f
558	oi	__LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
5595:	brasl	%r14,.Lcleanup_sie
560#endif
561	j	.Lmcck_stack
562.Lmcck_user:
563	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
564.Lmcck_stack:
565	lg	%r15,__LC_MCCK_STACK
566.Lmcck_skip:
567	la	%r11,STACK_FRAME_OVERHEAD(%r15)
568	stctg	%c1,%c1,__PT_CR1(%r11)
569	lctlg	%c1,%c1,__LC_KERNEL_ASCE
570	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
571	lghi	%r14,__LC_GPREGS_SAVE_AREA+64
572	stmg	%r0,%r7,__PT_R0(%r11)
573	# clear user controlled registers to prevent speculative use
574	xgr	%r0,%r0
575	xgr	%r1,%r1
576	xgr	%r3,%r3
577	xgr	%r4,%r4
578	xgr	%r5,%r5
579	xgr	%r6,%r6
580	xgr	%r7,%r7
581	xgr	%r10,%r10
582	mvc	__PT_R8(64,%r11),0(%r14)
583	stmg	%r8,%r9,__PT_PSW(%r11)
584	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
585	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
586	lgr	%r2,%r11		# pass pointer to pt_regs
587	brasl	%r14,s390_do_machine_check
588	cghi	%r2,0
589	je	.Lmcck_return
590	lg	%r1,__LC_KERNEL_STACK	# switch to kernel stack
591	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
592	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
593	la	%r11,STACK_FRAME_OVERHEAD(%r1)
594	lgr	%r15,%r1
595	brasl	%r14,s390_handle_mcck
596.Lmcck_return:
597	lctlg	%c1,%c1,__PT_CR1(%r11)
598	lmg	%r0,%r10,__PT_R0(%r11)
599	mvc	__LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
600	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
601	jno	0f
602	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
603	stpt	__LC_EXIT_TIMER
6040:	lmg	%r11,%r15,__PT_R11(%r11)
605	b	__LC_RETURN_MCCK_LPSWE
606
607.Lmcck_panic:
608	lg	%r15,__LC_NODAT_STACK
609	j	.Lmcck_skip
610ENDPROC(mcck_int_handler)
611
612#
613# PSW restart interrupt handler
614#
615ENTRY(restart_int_handler)
616	ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
617	stg	%r15,__LC_SAVE_AREA_RESTART
618	lg	%r15,__LC_RESTART_STACK
619	xc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
620	stmg	%r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
621	mvc	STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
622	mvc	STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW
623	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
624	lg	%r1,__LC_RESTART_FN		# load fn, parm & source cpu
625	lg	%r2,__LC_RESTART_DATA
626	lg	%r3,__LC_RESTART_SOURCE
627	ltgr	%r3,%r3				# test source cpu address
628	jm	1f				# negative -> skip source stop
6290:	sigp	%r4,%r3,SIGP_SENSE		# sigp sense to source cpu
630	brc	10,0b				# wait for status stored
6311:	basr	%r14,%r1			# call function
632	stap	__SF_EMPTY(%r15)		# store cpu address
633	llgh	%r3,__SF_EMPTY(%r15)
6342:	sigp	%r4,%r3,SIGP_STOP		# sigp stop to current cpu
635	brc	2,2b
6363:	j	3b
637ENDPROC(restart_int_handler)
638
639	.section .kprobes.text, "ax"
640
641#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
642/*
643 * The synchronous or the asynchronous stack overflowed. We are dead.
644 * No need to properly save the registers, we are going to panic anyway.
645 * Setup a pt_regs so that show_trace can provide a good call trace.
646 */
647ENTRY(stack_overflow)
648	lg	%r15,__LC_NODAT_STACK	# change to panic stack
649	la	%r11,STACK_FRAME_OVERHEAD(%r15)
650	stmg	%r0,%r7,__PT_R0(%r11)
651	stmg	%r8,%r9,__PT_PSW(%r11)
652	mvc	__PT_R8(64,%r11),0(%r14)
653	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
654	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
655	lgr	%r2,%r11		# pass pointer to pt_regs
656	jg	kernel_stack_overflow
657ENDPROC(stack_overflow)
658#endif
659
660#if IS_ENABLED(CONFIG_KVM)
661.Lcleanup_sie:
662	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
663	lg	%r9,__SF_SIE_CONTROL(%r15)	# get control block pointer
664	ni	__SIE_PROG0C+3(%r9),0xfe	# no longer in SIE
665	lctlg	%c1,%c1,__LC_KERNEL_ASCE
666	larl	%r9,sie_exit			# skip forward to sie_exit
667	BR_EX	%r14,%r13
668#endif
669	.section .rodata, "a"
670#define SYSCALL(esame,emu)	.quad __s390x_ ## esame
671	.globl	sys_call_table
672sys_call_table:
673#include "asm/syscall_table.h"
674#undef SYSCALL
675
676#ifdef CONFIG_COMPAT
677
678#define SYSCALL(esame,emu)	.quad __s390_ ## emu
679	.globl	sys_call_table_emu
680sys_call_table_emu:
681#include "asm/syscall_table.h"
682#undef SYSCALL
683#endif
684