xref: /linux/arch/s390/kernel/entry.S (revision e5e1bdf0bca8cd16ad39ed2febf6f689d9c07586)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *    S390 low-level entry points.
4 *
5 *    Copyright IBM Corp. 1999, 2012
6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 *		 Hartmut Penner (hp@de.ibm.com),
8 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 *		 Heiko Carstens <heiko.carstens@de.ibm.com>
10 */
11
12#include <linux/init.h>
13#include <linux/linkage.h>
14#include <asm/alternative-asm.h>
15#include <asm/processor.h>
16#include <asm/cache.h>
17#include <asm/ctl_reg.h>
18#include <asm/dwarf.h>
19#include <asm/errno.h>
20#include <asm/ptrace.h>
21#include <asm/thread_info.h>
22#include <asm/asm-offsets.h>
23#include <asm/unistd.h>
24#include <asm/page.h>
25#include <asm/sigp.h>
26#include <asm/irq.h>
27#include <asm/vx-insn.h>
28#include <asm/setup.h>
29#include <asm/nmi.h>
30#include <asm/export.h>
31#include <asm/nospec-insn.h>
32
33__PT_R0      =	__PT_GPRS
34__PT_R1      =	__PT_GPRS + 8
35__PT_R2      =	__PT_GPRS + 16
36__PT_R3      =	__PT_GPRS + 24
37__PT_R4      =	__PT_GPRS + 32
38__PT_R5      =	__PT_GPRS + 40
39__PT_R6      =	__PT_GPRS + 48
40__PT_R7      =	__PT_GPRS + 56
41__PT_R8      =	__PT_GPRS + 64
42__PT_R9      =	__PT_GPRS + 72
43__PT_R10     =	__PT_GPRS + 80
44__PT_R11     =	__PT_GPRS + 88
45__PT_R12     =	__PT_GPRS + 96
46__PT_R13     =	__PT_GPRS + 104
47__PT_R14     =	__PT_GPRS + 112
48__PT_R15     =	__PT_GPRS + 120
49
50STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
51STACK_SIZE  = 1 << STACK_SHIFT
52STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
53
54_LPP_OFFSET	= __LC_LPP
55
56	.macro	CHECK_STACK savearea
57#ifdef CONFIG_CHECK_STACK
58	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
59	lghi	%r14,\savearea
60	jz	stack_overflow
61#endif
62	.endm
63
64	.macro	CHECK_VMAP_STACK savearea,oklabel
65#ifdef CONFIG_VMAP_STACK
66	lgr	%r14,%r15
67	nill	%r14,0x10000 - STACK_SIZE
68	oill	%r14,STACK_INIT
69	clg	%r14,__LC_KERNEL_STACK
70	je	\oklabel
71	clg	%r14,__LC_ASYNC_STACK
72	je	\oklabel
73	clg	%r14,__LC_MCCK_STACK
74	je	\oklabel
75	clg	%r14,__LC_NODAT_STACK
76	je	\oklabel
77	clg	%r14,__LC_RESTART_STACK
78	je	\oklabel
79	lghi	%r14,\savearea
80	j	stack_overflow
81#else
82	j	\oklabel
83#endif
84	.endm
85
86	.macro STCK savearea
87	ALTERNATIVE ".insn	s,0xb2050000,\savearea", \
88		    ".insn	s,0xb27c0000,\savearea", 25
89	.endm
90
91	/*
92	 * The TSTMSK macro generates a test-under-mask instruction by
93	 * calculating the memory offset for the specified mask value.
94	 * Mask value can be any constant.  The macro shifts the mask
95	 * value to calculate the memory offset for the test-under-mask
96	 * instruction.
97	 */
98	.macro TSTMSK addr, mask, size=8, bytepos=0
99		.if (\bytepos < \size) && (\mask >> 8)
100			.if (\mask & 0xff)
101				.error "Mask exceeds byte boundary"
102			.endif
103			TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
104			.exitm
105		.endif
106		.ifeq \mask
107			.error "Mask must not be zero"
108		.endif
109		off = \size - \bytepos - 1
110		tm	off+\addr, \mask
111	.endm
112
113	.macro BPOFF
114	ALTERNATIVE "", ".long 0xb2e8c000", 82
115	.endm
116
117	.macro BPON
118	ALTERNATIVE "", ".long 0xb2e8d000", 82
119	.endm
120
121	.macro BPENTER tif_ptr,tif_mask
122	ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \
123		    "", 82
124	.endm
125
126	.macro BPEXIT tif_ptr,tif_mask
127	TSTMSK	\tif_ptr,\tif_mask
128	ALTERNATIVE "jz .+8;  .long 0xb2e8c000", \
129		    "jnz .+8; .long 0xb2e8d000", 82
130	.endm
131
132#if IS_ENABLED(CONFIG_KVM)
133	/*
134	 * The OUTSIDE macro jumps to the provided label in case the value
135	 * in the provided register is outside of the provided range. The
136	 * macro is useful for checking whether a PSW stored in a register
137	 * pair points inside or outside of a block of instructions.
138	 * @reg: register to check
139	 * @start: start of the range
140	 * @end: end of the range
141	 * @outside_label: jump here if @reg is outside of [@start..@end)
142	 */
143	.macro OUTSIDE reg,start,end,outside_label
144	lgr	%r14,\reg
145	larl	%r13,\start
146	slgr	%r14,%r13
147	lghi	%r13,\end - \start
148	clgr	%r14,%r13
149	jhe	\outside_label
150	.endm
151#endif
152
153	GEN_BR_THUNK %r14
154	GEN_BR_THUNK %r14,%r13
155
156	.section .kprobes.text, "ax"
157.Ldummy:
158	/*
159	 * This nop exists only in order to avoid that __bpon starts at
160	 * the beginning of the kprobes text section. In that case we would
161	 * have several symbols at the same address. E.g. objdump would take
162	 * an arbitrary symbol name when disassembling this code.
163	 * With the added nop in between the __bpon symbol is unique
164	 * again.
165	 */
166	nop	0
167
168ENTRY(__bpon)
169	.globl __bpon
170	BPON
171	BR_EX	%r14
172ENDPROC(__bpon)
173
174/*
175 * Scheduler resume function, called by switch_to
176 *  gpr2 = (task_struct *) prev
177 *  gpr3 = (task_struct *) next
178 * Returns:
179 *  gpr2 = prev
180 */
181ENTRY(__switch_to)
182	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
183	lghi	%r4,__TASK_stack
184	lghi	%r1,__TASK_thread
185	llill	%r5,STACK_INIT
186	stg	%r15,__THREAD_ksp(%r1,%r2)	# store kernel stack of prev
187	lg	%r15,0(%r4,%r3)			# start of kernel stack of next
188	agr	%r15,%r5			# end of kernel stack of next
189	stg	%r3,__LC_CURRENT		# store task struct of next
190	stg	%r15,__LC_KERNEL_STACK		# store end of kernel stack
191	lg	%r15,__THREAD_ksp(%r1,%r3)	# load kernel stack of next
192	aghi	%r3,__TASK_pid
193	mvc	__LC_CURRENT_PID(4,%r0),0(%r3)	# store pid of next
194	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
195	ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
196	BR_EX	%r14
197ENDPROC(__switch_to)
198
199#if IS_ENABLED(CONFIG_KVM)
200/*
201 * sie64a calling convention:
202 * %r2 pointer to sie control block
203 * %r3 guest register save area
204 */
205ENTRY(sie64a)
206	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
207	lg	%r12,__LC_CURRENT
208	stg	%r2,__SF_SIE_CONTROL(%r15)	# save control block pointer
209	stg	%r3,__SF_SIE_SAVEAREA(%r15)	# save guest register save area
210	xc	__SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
211	mvc	__SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
212	lmg	%r0,%r13,0(%r3)			# load guest gprs 0-13
213	lg	%r14,__LC_GMAP			# get gmap pointer
214	ltgr	%r14,%r14
215	jz	.Lsie_gmap
216	lctlg	%c1,%c1,__GMAP_ASCE(%r14)	# load primary asce
217.Lsie_gmap:
218	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
219	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
220	tm	__SIE_PROG20+3(%r14),3		# last exit...
221	jnz	.Lsie_skip
222	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
223	jo	.Lsie_skip			# exit if fp/vx regs changed
224	BPEXIT	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
225.Lsie_entry:
226	sie	0(%r14)
227	BPOFF
228	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
229.Lsie_skip:
230	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
231	lctlg	%c1,%c1,__LC_KERNEL_ASCE	# load primary asce
232.Lsie_done:
233# some program checks are suppressing. C code (e.g. do_protection_exception)
234# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
235# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
236# Other instructions between sie64a and .Lsie_done should not cause program
237# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
238# See also .Lcleanup_sie
239.Lrewind_pad6:
240	nopr	7
241.Lrewind_pad4:
242	nopr	7
243.Lrewind_pad2:
244	nopr	7
245	.globl sie_exit
246sie_exit:
247	lg	%r14,__SF_SIE_SAVEAREA(%r15)	# load guest register save area
248	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
249	xgr	%r0,%r0				# clear guest registers to
250	xgr	%r1,%r1				# prevent speculative use
251	xgr	%r3,%r3
252	xgr	%r4,%r4
253	xgr	%r5,%r5
254	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
255	lg	%r2,__SF_SIE_REASON(%r15)	# return exit reason code
256	BR_EX	%r14
257.Lsie_fault:
258	lghi	%r14,-EFAULT
259	stg	%r14,__SF_SIE_REASON(%r15)	# set exit reason code
260	j	sie_exit
261
262	EX_TABLE(.Lrewind_pad6,.Lsie_fault)
263	EX_TABLE(.Lrewind_pad4,.Lsie_fault)
264	EX_TABLE(.Lrewind_pad2,.Lsie_fault)
265	EX_TABLE(sie_exit,.Lsie_fault)
266ENDPROC(sie64a)
267EXPORT_SYMBOL(sie64a)
268EXPORT_SYMBOL(sie_exit)
269#endif
270
271/*
272 * SVC interrupt handler routine. System calls are synchronous events and
273 * are entered with interrupts disabled.
274 */
275
276ENTRY(system_call)
277	stpt	__LC_SYS_ENTER_TIMER
278	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
279	BPOFF
280	lghi	%r14,0
281.Lsysc_per:
282	lctlg	%c1,%c1,__LC_KERNEL_ASCE
283	lg	%r12,__LC_CURRENT
284	lg	%r15,__LC_KERNEL_STACK
285	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
286	stmg	%r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
287	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
288	# clear user controlled register to prevent speculative use
289	xgr	%r0,%r0
290	xgr	%r1,%r1
291	xgr	%r4,%r4
292	xgr	%r5,%r5
293	xgr	%r6,%r6
294	xgr	%r7,%r7
295	xgr	%r8,%r8
296	xgr	%r9,%r9
297	xgr	%r10,%r10
298	xgr	%r11,%r11
299	la	%r2,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
300	mvc	__PT_R8(64,%r2),__LC_SAVE_AREA_SYNC
301	lgr	%r3,%r14
302	brasl	%r14,__do_syscall
303	lctlg	%c1,%c1,__LC_USER_ASCE
304	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
305	BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
306	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
307	stpt	__LC_EXIT_TIMER
308	b	__LC_RETURN_LPSWE
309ENDPROC(system_call)
310
311#
312# a new process exits the kernel with ret_from_fork
313#
314ENTRY(ret_from_fork)
315	lgr	%r3,%r11
316	brasl	%r14,__ret_from_fork
317	lctlg	%c1,%c1,__LC_USER_ASCE
318	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
319	BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
320	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
321	stpt	__LC_EXIT_TIMER
322	b	__LC_RETURN_LPSWE
323ENDPROC(ret_from_fork)
324
325/*
326 * Program check handler routine
327 */
328
329ENTRY(pgm_check_handler)
330	stpt	__LC_SYS_ENTER_TIMER
331	BPOFF
332	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
333	lg	%r12,__LC_CURRENT
334	lghi	%r10,0
335	lmg	%r8,%r9,__LC_PGM_OLD_PSW
336	tmhh	%r8,0x0001		# coming from user space?
337	jno	.Lpgm_skip_asce
338	lctlg	%c1,%c1,__LC_KERNEL_ASCE
339	j	3f			# -> fault in user space
340.Lpgm_skip_asce:
341#if IS_ENABLED(CONFIG_KVM)
342	# cleanup critical section for program checks in sie64a
343	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,1f
344	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
345	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
346	lctlg	%c1,%c1,__LC_KERNEL_ASCE	# load primary asce
347	larl	%r9,sie_exit			# skip forward to sie_exit
348	lghi	%r10,_PIF_GUEST_FAULT
349#endif
3501:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
351	jnz	2f			# -> enabled, can't be a double fault
352	tm	__LC_PGM_ILC+3,0x80	# check for per exception
353	jnz	.Lpgm_svcper		# -> single stepped svc
3542:	CHECK_STACK __LC_SAVE_AREA_SYNC
355	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
356	# CHECK_VMAP_STACK branches to stack_overflow or 4f
357	CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
3583:	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
359	lg	%r15,__LC_KERNEL_STACK
3604:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
361	stg	%r10,__PT_FLAGS(%r11)
362	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
363	stmg	%r0,%r7,__PT_R0(%r11)
364	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
365	stmg	%r8,%r9,__PT_PSW(%r11)
366
367	# clear user controlled registers to prevent speculative use
368	xgr	%r0,%r0
369	xgr	%r1,%r1
370	xgr	%r3,%r3
371	xgr	%r4,%r4
372	xgr	%r5,%r5
373	xgr	%r6,%r6
374	xgr	%r7,%r7
375	lgr	%r2,%r11
376	brasl	%r14,__do_pgm_check
377	tmhh	%r8,0x0001		# returning to user space?
378	jno	.Lpgm_exit_kernel
379	lctlg	%c1,%c1,__LC_USER_ASCE
380	BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
381	stpt	__LC_EXIT_TIMER
382.Lpgm_exit_kernel:
383	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
384	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
385	b	__LC_RETURN_LPSWE
386
387#
388# single stepped system call
389#
390.Lpgm_svcper:
391	mvc	__LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
392	larl	%r14,.Lsysc_per
393	stg	%r14,__LC_RETURN_PSW+8
394	lghi	%r14,1
395	lpswe	__LC_RETURN_PSW		# branch to .Lsysc_per
396ENDPROC(pgm_check_handler)
397
398/*
399 * Interrupt handler macro used for external and IO interrupts.
400 */
401.macro INT_HANDLER name,lc_old_psw,handler
402ENTRY(\name)
403	STCK	__LC_INT_CLOCK
404	stpt	__LC_SYS_ENTER_TIMER
405	BPOFF
406	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
407	lg	%r12,__LC_CURRENT
408	lmg	%r8,%r9,\lc_old_psw
409	tmhh	%r8,0x0001			# interrupting from user ?
410	jnz	1f
411#if IS_ENABLED(CONFIG_KVM)
412	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,0f
413	brasl	%r14,.Lcleanup_sie
414#endif
4150:	CHECK_STACK __LC_SAVE_AREA_ASYNC
416	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
417	j	2f
4181:	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
419	lctlg	%c1,%c1,__LC_KERNEL_ASCE
420	lg	%r15,__LC_KERNEL_STACK
4212:	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
422	la	%r11,STACK_FRAME_OVERHEAD(%r15)
423	stmg	%r0,%r7,__PT_R0(%r11)
424	# clear user controlled registers to prevent speculative use
425	xgr	%r0,%r0
426	xgr	%r1,%r1
427	xgr	%r3,%r3
428	xgr	%r4,%r4
429	xgr	%r5,%r5
430	xgr	%r6,%r6
431	xgr	%r7,%r7
432	xgr	%r10,%r10
433	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
434	stmg	%r8,%r9,__PT_PSW(%r11)
435	tm	%r8,0x0001		# coming from user space?
436	jno	1f
437	lctlg	%c1,%c1,__LC_KERNEL_ASCE
4381:	lgr	%r2,%r11		# pass pointer to pt_regs
439	brasl	%r14,\handler
440	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
441	tmhh	%r8,0x0001		# returning to user ?
442	jno	2f
443	lctlg	%c1,%c1,__LC_USER_ASCE
444	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
445	stpt	__LC_EXIT_TIMER
4462:	lmg	%r0,%r15,__PT_R0(%r11)
447	b	__LC_RETURN_LPSWE
448ENDPROC(\name)
449.endm
450
451INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
452INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
453
454/*
455 * Load idle PSW.
456 */
457ENTRY(psw_idle)
458	stg	%r14,(__SF_GPRS+8*8)(%r15)
459	stg	%r3,__SF_EMPTY(%r15)
460	larl	%r1,psw_idle_exit
461	stg	%r1,__SF_EMPTY+8(%r15)
462	larl	%r1,smp_cpu_mtid
463	llgf	%r1,0(%r1)
464	ltgr	%r1,%r1
465	jz	.Lpsw_idle_stcctm
466	.insn	rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2)
467.Lpsw_idle_stcctm:
468	oi	__LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
469	BPON
470	STCK	__CLOCK_IDLE_ENTER(%r2)
471	stpt	__TIMER_IDLE_ENTER(%r2)
472	lpswe	__SF_EMPTY(%r15)
473.globl psw_idle_exit
474psw_idle_exit:
475	BR_EX	%r14
476ENDPROC(psw_idle)
477
478/*
479 * Machine check handler routines
480 */
481ENTRY(mcck_int_handler)
482	STCK	__LC_MCCK_CLOCK
483	BPOFF
484	la	%r1,4095		# validate r1
485	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# validate cpu timer
486	sckc	__LC_CLOCK_COMPARATOR			# validate comparator
487	lam	%a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs
488	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
489	lg	%r12,__LC_CURRENT
490	lmg	%r8,%r9,__LC_MCK_OLD_PSW
491	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
492	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
493	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CR_VALID
494	jno	.Lmcck_panic		# control registers invalid -> panic
495	la	%r14,4095
496	lctlg	%c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
497	ptlb
498	lg	%r11,__LC_MCESAD-4095(%r14) # extended machine check save area
499	nill	%r11,0xfc00		# MCESA_ORIGIN_MASK
500	TSTMSK	__LC_CREGS_SAVE_AREA+16-4095(%r14),CR2_GUARDED_STORAGE
501	jno	0f
502	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_GS_VALID
503	jno	0f
504	.insn	 rxy,0xe3000000004d,0,__MCESA_GS_SAVE_AREA(%r11) # LGSC
5050:	l	%r14,__LC_FP_CREG_SAVE_AREA-4095(%r14)
506	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_FC_VALID
507	jo	0f
508	sr	%r14,%r14
5090:	sfpc	%r14
510	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
511	jo	0f
512	lghi	%r14,__LC_FPREGS_SAVE_AREA
513	ld	%f0,0(%r14)
514	ld	%f1,8(%r14)
515	ld	%f2,16(%r14)
516	ld	%f3,24(%r14)
517	ld	%f4,32(%r14)
518	ld	%f5,40(%r14)
519	ld	%f6,48(%r14)
520	ld	%f7,56(%r14)
521	ld	%f8,64(%r14)
522	ld	%f9,72(%r14)
523	ld	%f10,80(%r14)
524	ld	%f11,88(%r14)
525	ld	%f12,96(%r14)
526	ld	%f13,104(%r14)
527	ld	%f14,112(%r14)
528	ld	%f15,120(%r14)
529	j	1f
5300:	VLM	%v0,%v15,0,%r11
531	VLM	%v16,%v31,256,%r11
5321:	lghi	%r14,__LC_CPU_TIMER_SAVE_AREA
533	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
534	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
535	jo	3f
536	la	%r14,__LC_SYS_ENTER_TIMER
537	clc	0(8,%r14),__LC_EXIT_TIMER
538	jl	1f
539	la	%r14,__LC_EXIT_TIMER
5401:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
541	jl	2f
542	la	%r14,__LC_LAST_UPDATE_TIMER
5432:	spt	0(%r14)
544	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
5453:	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
546	jno	.Lmcck_panic
547	tmhh	%r8,0x0001		# interrupting from user ?
548	jnz	4f
549	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
550	jno	.Lmcck_panic
5514:	ssm	__LC_PGM_NEW_PSW	# turn dat on, keep irqs off
552	tmhh	%r8,0x0001			# interrupting from user ?
553	jnz	.Lmcck_user
554#if IS_ENABLED(CONFIG_KVM)
555	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,.Lmcck_stack
556	OUTSIDE	%r9,.Lsie_entry,.Lsie_skip,5f
557	oi	__LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
5585:	brasl	%r14,.Lcleanup_sie
559#endif
560	j	.Lmcck_stack
561.Lmcck_user:
562	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
563.Lmcck_stack:
564	lg	%r15,__LC_MCCK_STACK
565.Lmcck_skip:
566	la	%r11,STACK_FRAME_OVERHEAD(%r15)
567	stctg	%c1,%c1,__PT_CR1(%r11)
568	lctlg	%c1,%c1,__LC_KERNEL_ASCE
569	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
570	lghi	%r14,__LC_GPREGS_SAVE_AREA+64
571	stmg	%r0,%r7,__PT_R0(%r11)
572	# clear user controlled registers to prevent speculative use
573	xgr	%r0,%r0
574	xgr	%r1,%r1
575	xgr	%r3,%r3
576	xgr	%r4,%r4
577	xgr	%r5,%r5
578	xgr	%r6,%r6
579	xgr	%r7,%r7
580	xgr	%r10,%r10
581	mvc	__PT_R8(64,%r11),0(%r14)
582	stmg	%r8,%r9,__PT_PSW(%r11)
583	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
584	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
585	lgr	%r2,%r11		# pass pointer to pt_regs
586	brasl	%r14,s390_do_machine_check
587	cghi	%r2,0
588	je	.Lmcck_return
589	lg	%r1,__LC_KERNEL_STACK	# switch to kernel stack
590	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
591	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
592	la	%r11,STACK_FRAME_OVERHEAD(%r1)
593	lgr	%r15,%r1
594	brasl	%r14,s390_handle_mcck
595.Lmcck_return:
596	lctlg	%c1,%c1,__PT_CR1(%r11)
597	lmg	%r0,%r10,__PT_R0(%r11)
598	mvc	__LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
599	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
600	jno	0f
601	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
602	stpt	__LC_EXIT_TIMER
6030:	lmg	%r11,%r15,__PT_R11(%r11)
604	b	__LC_RETURN_MCCK_LPSWE
605
606.Lmcck_panic:
607	lg	%r15,__LC_NODAT_STACK
608	j	.Lmcck_skip
609ENDPROC(mcck_int_handler)
610
611#
612# PSW restart interrupt handler
613#
614ENTRY(restart_int_handler)
615	ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
616	stg	%r15,__LC_SAVE_AREA_RESTART
617	lg	%r15,__LC_RESTART_STACK
618	xc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
619	stmg	%r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
620	mvc	STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
621	mvc	STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW
622	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
623	lg	%r1,__LC_RESTART_FN		# load fn, parm & source cpu
624	lg	%r2,__LC_RESTART_DATA
625	lg	%r3,__LC_RESTART_SOURCE
626	ltgr	%r3,%r3				# test source cpu address
627	jm	1f				# negative -> skip source stop
6280:	sigp	%r4,%r3,SIGP_SENSE		# sigp sense to source cpu
629	brc	10,0b				# wait for status stored
6301:	basr	%r14,%r1			# call function
631	stap	__SF_EMPTY(%r15)		# store cpu address
632	llgh	%r3,__SF_EMPTY(%r15)
6332:	sigp	%r4,%r3,SIGP_STOP		# sigp stop to current cpu
634	brc	2,2b
6353:	j	3b
636ENDPROC(restart_int_handler)
637
638	.section .kprobes.text, "ax"
639
640#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
641/*
642 * The synchronous or the asynchronous stack overflowed. We are dead.
643 * No need to properly save the registers, we are going to panic anyway.
644 * Setup a pt_regs so that show_trace can provide a good call trace.
645 */
646ENTRY(stack_overflow)
647	lg	%r15,__LC_NODAT_STACK	# change to panic stack
648	la	%r11,STACK_FRAME_OVERHEAD(%r15)
649	stmg	%r0,%r7,__PT_R0(%r11)
650	stmg	%r8,%r9,__PT_PSW(%r11)
651	mvc	__PT_R8(64,%r11),0(%r14)
652	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
653	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
654	lgr	%r2,%r11		# pass pointer to pt_regs
655	jg	kernel_stack_overflow
656ENDPROC(stack_overflow)
657#endif
658
659#if IS_ENABLED(CONFIG_KVM)
660.Lcleanup_sie:
661	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
662	lg	%r9,__SF_SIE_CONTROL(%r15)	# get control block pointer
663	ni	__SIE_PROG0C+3(%r9),0xfe	# no longer in SIE
664	lctlg	%c1,%c1,__LC_KERNEL_ASCE
665	larl	%r9,sie_exit			# skip forward to sie_exit
666	BR_EX	%r14,%r13
667#endif
668	.section .rodata, "a"
669#define SYSCALL(esame,emu)	.quad __s390x_ ## esame
670	.globl	sys_call_table
671sys_call_table:
672#include "asm/syscall_table.h"
673#undef SYSCALL
674
675#ifdef CONFIG_COMPAT
676
677#define SYSCALL(esame,emu)	.quad __s390_ ## emu
678	.globl	sys_call_table_emu
679sys_call_table_emu:
680#include "asm/syscall_table.h"
681#undef SYSCALL
682#endif
683