xref: /linux/arch/s390/kernel/entry.S (revision d4fffba4d04b8d605ff07f1ed987399f6af0ad5b)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *    S390 low-level entry points.
4 *
5 *    Copyright IBM Corp. 1999, 2012
6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 *		 Hartmut Penner (hp@de.ibm.com),
8 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 */
10
11#include <linux/init.h>
12#include <linux/linkage.h>
13#include <asm/asm-extable.h>
14#include <asm/alternative-asm.h>
15#include <asm/processor.h>
16#include <asm/cache.h>
17#include <asm/dwarf.h>
18#include <asm/errno.h>
19#include <asm/ptrace.h>
20#include <asm/thread_info.h>
21#include <asm/asm-offsets.h>
22#include <asm/unistd.h>
23#include <asm/page.h>
24#include <asm/sigp.h>
25#include <asm/irq.h>
26#include <asm/vx-insn.h>
27#include <asm/setup.h>
28#include <asm/nmi.h>
29#include <asm/export.h>
30#include <asm/nospec-insn.h>
31
32STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
33STACK_SIZE  = 1 << STACK_SHIFT
34STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
35
36_LPP_OFFSET	= __LC_LPP
37
38	.macro STBEAR address
39	ALTERNATIVE "nop", ".insn s,0xb2010000,\address", 193
40	.endm
41
42	.macro LBEAR address
43	ALTERNATIVE "nop", ".insn s,0xb2000000,\address", 193
44	.endm
45
46	.macro LPSWEY address,lpswe
47	ALTERNATIVE "b \lpswe; nopr", ".insn siy,0xeb0000000071,\address,0", 193
48	.endm
49
50	.macro MBEAR reg
51	ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193
52	.endm
53
54	.macro	CHECK_STACK savearea
55#ifdef CONFIG_CHECK_STACK
56	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
57	lghi	%r14,\savearea
58	jz	stack_overflow
59#endif
60	.endm
61
62	.macro	CHECK_VMAP_STACK savearea,oklabel
63#ifdef CONFIG_VMAP_STACK
64	lgr	%r14,%r15
65	nill	%r14,0x10000 - STACK_SIZE
66	oill	%r14,STACK_INIT
67	clg	%r14,__LC_KERNEL_STACK
68	je	\oklabel
69	clg	%r14,__LC_ASYNC_STACK
70	je	\oklabel
71	clg	%r14,__LC_MCCK_STACK
72	je	\oklabel
73	clg	%r14,__LC_NODAT_STACK
74	je	\oklabel
75	clg	%r14,__LC_RESTART_STACK
76	je	\oklabel
77	lghi	%r14,\savearea
78	j	stack_overflow
79#else
80	j	\oklabel
81#endif
82	.endm
83
84	/*
85	 * The TSTMSK macro generates a test-under-mask instruction by
86	 * calculating the memory offset for the specified mask value.
87	 * Mask value can be any constant.  The macro shifts the mask
88	 * value to calculate the memory offset for the test-under-mask
89	 * instruction.
90	 */
91	.macro TSTMSK addr, mask, size=8, bytepos=0
92		.if (\bytepos < \size) && (\mask >> 8)
93			.if (\mask & 0xff)
94				.error "Mask exceeds byte boundary"
95			.endif
96			TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
97			.exitm
98		.endif
99		.ifeq \mask
100			.error "Mask must not be zero"
101		.endif
102		off = \size - \bytepos - 1
103		tm	off+\addr, \mask
104	.endm
105
106	.macro BPOFF
107	ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", 82
108	.endm
109
110	.macro BPON
111	ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", 82
112	.endm
113
114	.macro BPENTER tif_ptr,tif_mask
115	ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \
116		    "j .+12; nop; nop", 82
117	.endm
118
119	.macro BPEXIT tif_ptr,tif_mask
120	TSTMSK	\tif_ptr,\tif_mask
121	ALTERNATIVE "jz .+8;  .insn rrf,0xb2e80000,0,0,12,0", \
122		    "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", 82
123	.endm
124
125#if IS_ENABLED(CONFIG_KVM)
126	/*
127	 * The OUTSIDE macro jumps to the provided label in case the value
128	 * in the provided register is outside of the provided range. The
129	 * macro is useful for checking whether a PSW stored in a register
130	 * pair points inside or outside of a block of instructions.
131	 * @reg: register to check
132	 * @start: start of the range
133	 * @end: end of the range
134	 * @outside_label: jump here if @reg is outside of [@start..@end)
135	 */
136	.macro OUTSIDE reg,start,end,outside_label
137	lgr	%r14,\reg
138	larl	%r13,\start
139	slgr	%r14,%r13
140#ifdef CONFIG_AS_IS_LLVM
141	clgfrl	%r14,.Lrange_size\@
142#else
143	clgfi	%r14,\end - \start
144#endif
145	jhe	\outside_label
146#ifdef CONFIG_AS_IS_LLVM
147	.section .rodata, "a"
148	.align 4
149.Lrange_size\@:
150	.long	\end - \start
151	.previous
152#endif
153	.endm
154
155	.macro SIEEXIT
156	lg	%r9,__SF_SIE_CONTROL(%r15)	# get control block pointer
157	ni	__SIE_PROG0C+3(%r9),0xfe	# no longer in SIE
158	lctlg	%c1,%c1,__LC_KERNEL_ASCE	# load primary asce
159	larl	%r9,sie_exit			# skip forward to sie_exit
160	.endm
161#endif
162
163	GEN_BR_THUNK %r14
164
165	.section .kprobes.text, "ax"
166.Ldummy:
167	/*
168	 * This nop exists only in order to avoid that __bpon starts at
169	 * the beginning of the kprobes text section. In that case we would
170	 * have several symbols at the same address. E.g. objdump would take
171	 * an arbitrary symbol name when disassembling this code.
172	 * With the added nop in between the __bpon symbol is unique
173	 * again.
174	 */
175	nop	0
176
177ENTRY(__bpon)
178	.globl __bpon
179	BPON
180	BR_EX	%r14
181ENDPROC(__bpon)
182
183/*
184 * Scheduler resume function, called by switch_to
185 *  gpr2 = (task_struct *) prev
186 *  gpr3 = (task_struct *) next
187 * Returns:
188 *  gpr2 = prev
189 */
190ENTRY(__switch_to)
191	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
192	lghi	%r4,__TASK_stack
193	lghi	%r1,__TASK_thread
194	llill	%r5,STACK_INIT
195	stg	%r15,__THREAD_ksp(%r1,%r2)	# store kernel stack of prev
196	lg	%r15,0(%r4,%r3)			# start of kernel stack of next
197	agr	%r15,%r5			# end of kernel stack of next
198	stg	%r3,__LC_CURRENT		# store task struct of next
199	stg	%r15,__LC_KERNEL_STACK		# store end of kernel stack
200	lg	%r15,__THREAD_ksp(%r1,%r3)	# load kernel stack of next
201	aghi	%r3,__TASK_pid
202	mvc	__LC_CURRENT_PID(4,%r0),0(%r3)	# store pid of next
203	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
204	ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40
205	BR_EX	%r14
206ENDPROC(__switch_to)
207
208#if IS_ENABLED(CONFIG_KVM)
209/*
210 * sie64a calling convention:
211 * %r2 pointer to sie control block
212 * %r3 guest register save area
213 */
214ENTRY(sie64a)
215	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
216	lg	%r12,__LC_CURRENT
217	stg	%r2,__SF_SIE_CONTROL(%r15)	# save control block pointer
218	stg	%r3,__SF_SIE_SAVEAREA(%r15)	# save guest register save area
219	xc	__SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
220	mvc	__SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
221	lmg	%r0,%r13,0(%r3)			# load guest gprs 0-13
222	lg	%r14,__LC_GMAP			# get gmap pointer
223	ltgr	%r14,%r14
224	jz	.Lsie_gmap
225	lctlg	%c1,%c1,__GMAP_ASCE(%r14)	# load primary asce
226.Lsie_gmap:
227	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
228	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
229	tm	__SIE_PROG20+3(%r14),3		# last exit...
230	jnz	.Lsie_skip
231	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
232	jo	.Lsie_skip			# exit if fp/vx regs changed
233	BPEXIT	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
234.Lsie_entry:
235	sie	0(%r14)
236# Let the next instruction be NOP to avoid triggering a machine check
237# and handling it in a guest as result of the instruction execution.
238	nopr	7
239.Lsie_leave:
240	BPOFF
241	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
242.Lsie_skip:
243	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
244	lctlg	%c1,%c1,__LC_KERNEL_ASCE	# load primary asce
245.Lsie_done:
246# some program checks are suppressing. C code (e.g. do_protection_exception)
247# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
248# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
249# Other instructions between sie64a and .Lsie_done should not cause program
250# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
251.Lrewind_pad6:
252	nopr	7
253.Lrewind_pad4:
254	nopr	7
255.Lrewind_pad2:
256	nopr	7
257	.globl sie_exit
258sie_exit:
259	lg	%r14,__SF_SIE_SAVEAREA(%r15)	# load guest register save area
260	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
261	xgr	%r0,%r0				# clear guest registers to
262	xgr	%r1,%r1				# prevent speculative use
263	xgr	%r3,%r3
264	xgr	%r4,%r4
265	xgr	%r5,%r5
266	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
267	lg	%r2,__SF_SIE_REASON(%r15)	# return exit reason code
268	BR_EX	%r14
269.Lsie_fault:
270	lghi	%r14,-EFAULT
271	stg	%r14,__SF_SIE_REASON(%r15)	# set exit reason code
272	j	sie_exit
273
274	EX_TABLE(.Lrewind_pad6,.Lsie_fault)
275	EX_TABLE(.Lrewind_pad4,.Lsie_fault)
276	EX_TABLE(.Lrewind_pad2,.Lsie_fault)
277	EX_TABLE(sie_exit,.Lsie_fault)
278ENDPROC(sie64a)
279EXPORT_SYMBOL(sie64a)
280EXPORT_SYMBOL(sie_exit)
281#endif
282
283/*
284 * SVC interrupt handler routine. System calls are synchronous events and
285 * are entered with interrupts disabled.
286 */
287
288ENTRY(system_call)
289	stpt	__LC_SYS_ENTER_TIMER
290	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
291	BPOFF
292	lghi	%r14,0
293.Lsysc_per:
294	STBEAR	__LC_LAST_BREAK
295	lctlg	%c1,%c1,__LC_KERNEL_ASCE
296	lg	%r12,__LC_CURRENT
297	lg	%r15,__LC_KERNEL_STACK
298	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
299	stmg	%r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
300	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
301	# clear user controlled register to prevent speculative use
302	xgr	%r0,%r0
303	xgr	%r1,%r1
304	xgr	%r4,%r4
305	xgr	%r5,%r5
306	xgr	%r6,%r6
307	xgr	%r7,%r7
308	xgr	%r8,%r8
309	xgr	%r9,%r9
310	xgr	%r10,%r10
311	xgr	%r11,%r11
312	la	%r2,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
313	mvc	__PT_R8(64,%r2),__LC_SAVE_AREA_SYNC
314	MBEAR	%r2
315	lgr	%r3,%r14
316	brasl	%r14,__do_syscall
317	lctlg	%c1,%c1,__LC_USER_ASCE
318	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
319	BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
320	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
321	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
322	stpt	__LC_EXIT_TIMER
323	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
324ENDPROC(system_call)
325
326#
327# a new process exits the kernel with ret_from_fork
328#
329ENTRY(ret_from_fork)
330	lgr	%r3,%r11
331	brasl	%r14,__ret_from_fork
332	lctlg	%c1,%c1,__LC_USER_ASCE
333	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
334	BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
335	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
336	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
337	stpt	__LC_EXIT_TIMER
338	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
339ENDPROC(ret_from_fork)
340
341/*
342 * Program check handler routine
343 */
344
345ENTRY(pgm_check_handler)
346	stpt	__LC_SYS_ENTER_TIMER
347	BPOFF
348	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
349	lg	%r12,__LC_CURRENT
350	lghi	%r10,0
351	lmg	%r8,%r9,__LC_PGM_OLD_PSW
352	tmhh	%r8,0x0001		# coming from user space?
353	jno	.Lpgm_skip_asce
354	lctlg	%c1,%c1,__LC_KERNEL_ASCE
355	j	3f			# -> fault in user space
356.Lpgm_skip_asce:
357#if IS_ENABLED(CONFIG_KVM)
358	# cleanup critical section for program checks in sie64a
359	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,1f
360	SIEEXIT
361	lghi	%r10,_PIF_GUEST_FAULT
362#endif
3631:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
364	jnz	2f			# -> enabled, can't be a double fault
365	tm	__LC_PGM_ILC+3,0x80	# check for per exception
366	jnz	.Lpgm_svcper		# -> single stepped svc
3672:	CHECK_STACK __LC_SAVE_AREA_SYNC
368	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
369	# CHECK_VMAP_STACK branches to stack_overflow or 4f
370	CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
3713:	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
372	lg	%r15,__LC_KERNEL_STACK
3734:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
374	stg	%r10,__PT_FLAGS(%r11)
375	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
376	stmg	%r0,%r7,__PT_R0(%r11)
377	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
378	mvc	__PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK
379	stmg	%r8,%r9,__PT_PSW(%r11)
380
381	# clear user controlled registers to prevent speculative use
382	xgr	%r0,%r0
383	xgr	%r1,%r1
384	xgr	%r3,%r3
385	xgr	%r4,%r4
386	xgr	%r5,%r5
387	xgr	%r6,%r6
388	xgr	%r7,%r7
389	lgr	%r2,%r11
390	brasl	%r14,__do_pgm_check
391	tmhh	%r8,0x0001		# returning to user space?
392	jno	.Lpgm_exit_kernel
393	lctlg	%c1,%c1,__LC_USER_ASCE
394	BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
395	stpt	__LC_EXIT_TIMER
396.Lpgm_exit_kernel:
397	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
398	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
399	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
400	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
401
402#
403# single stepped system call
404#
405.Lpgm_svcper:
406	mvc	__LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
407	larl	%r14,.Lsysc_per
408	stg	%r14,__LC_RETURN_PSW+8
409	lghi	%r14,1
410	LBEAR	__LC_PGM_LAST_BREAK
411	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per
412ENDPROC(pgm_check_handler)
413
414/*
415 * Interrupt handler macro used for external and IO interrupts.
416 */
417.macro INT_HANDLER name,lc_old_psw,handler
418ENTRY(\name)
419	stckf	__LC_INT_CLOCK
420	stpt	__LC_SYS_ENTER_TIMER
421	STBEAR	__LC_LAST_BREAK
422	BPOFF
423	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
424	lg	%r12,__LC_CURRENT
425	lmg	%r8,%r9,\lc_old_psw
426	tmhh	%r8,0x0001			# interrupting from user ?
427	jnz	1f
428#if IS_ENABLED(CONFIG_KVM)
429	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,0f
430	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
431	SIEEXIT
432#endif
4330:	CHECK_STACK __LC_SAVE_AREA_ASYNC
434	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
435	j	2f
4361:	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
437	lctlg	%c1,%c1,__LC_KERNEL_ASCE
438	lg	%r15,__LC_KERNEL_STACK
4392:	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
440	la	%r11,STACK_FRAME_OVERHEAD(%r15)
441	stmg	%r0,%r7,__PT_R0(%r11)
442	# clear user controlled registers to prevent speculative use
443	xgr	%r0,%r0
444	xgr	%r1,%r1
445	xgr	%r3,%r3
446	xgr	%r4,%r4
447	xgr	%r5,%r5
448	xgr	%r6,%r6
449	xgr	%r7,%r7
450	xgr	%r10,%r10
451	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
452	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
453	MBEAR	%r11
454	stmg	%r8,%r9,__PT_PSW(%r11)
455	lgr	%r2,%r11		# pass pointer to pt_regs
456	brasl	%r14,\handler
457	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
458	tmhh	%r8,0x0001		# returning to user ?
459	jno	2f
460	lctlg	%c1,%c1,__LC_USER_ASCE
461	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
462	stpt	__LC_EXIT_TIMER
4632:	LBEAR	__PT_LAST_BREAK(%r11)
464	lmg	%r0,%r15,__PT_R0(%r11)
465	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
466ENDPROC(\name)
467.endm
468
469INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
470INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
471
472/*
473 * Load idle PSW.
474 */
475ENTRY(psw_idle)
476	stg	%r14,(__SF_GPRS+8*8)(%r15)
477	stg	%r3,__SF_EMPTY(%r15)
478	larl	%r1,psw_idle_exit
479	stg	%r1,__SF_EMPTY+8(%r15)
480	larl	%r1,smp_cpu_mtid
481	llgf	%r1,0(%r1)
482	ltgr	%r1,%r1
483	jz	.Lpsw_idle_stcctm
484	.insn	rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2)
485.Lpsw_idle_stcctm:
486	oi	__LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
487	BPON
488	stckf	__CLOCK_IDLE_ENTER(%r2)
489	stpt	__TIMER_IDLE_ENTER(%r2)
490	lpswe	__SF_EMPTY(%r15)
491.globl psw_idle_exit
492psw_idle_exit:
493	BR_EX	%r14
494ENDPROC(psw_idle)
495
496/*
497 * Machine check handler routines
498 */
499ENTRY(mcck_int_handler)
500	stckf	__LC_MCCK_CLOCK
501	BPOFF
502	la	%r1,4095		# validate r1
503	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# validate cpu timer
504	LBEAR	__LC_LAST_BREAK_SAVE_AREA-4095(%r1)		# validate bear
505	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
506	lg	%r12,__LC_CURRENT
507	lmg	%r8,%r9,__LC_MCK_OLD_PSW
508	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
509	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
510	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CR_VALID
511	jno	.Lmcck_panic		# control registers invalid -> panic
512	la	%r14,4095
513	lctlg	%c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
514	ptlb
515	lghi	%r14,__LC_CPU_TIMER_SAVE_AREA
516	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
517	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
518	jo	3f
519	la	%r14,__LC_SYS_ENTER_TIMER
520	clc	0(8,%r14),__LC_EXIT_TIMER
521	jl	1f
522	la	%r14,__LC_EXIT_TIMER
5231:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
524	jl	2f
525	la	%r14,__LC_LAST_UPDATE_TIMER
5262:	spt	0(%r14)
527	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
5283:	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
529	jno	.Lmcck_panic
530	tmhh	%r8,0x0001		# interrupting from user ?
531	jnz	.Lmcck_user
532	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
533	jno	.Lmcck_panic
534#if IS_ENABLED(CONFIG_KVM)
535	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,.Lmcck_stack
536	OUTSIDE	%r9,.Lsie_entry,.Lsie_leave,4f
537	oi	__LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
5384:	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
539	SIEEXIT
540	j	.Lmcck_stack
541#endif
542.Lmcck_user:
543	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
544.Lmcck_stack:
545	lg	%r15,__LC_MCCK_STACK
546	la	%r11,STACK_FRAME_OVERHEAD(%r15)
547	stctg	%c1,%c1,__PT_CR1(%r11)
548	lctlg	%c1,%c1,__LC_KERNEL_ASCE
549	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
550	lghi	%r14,__LC_GPREGS_SAVE_AREA+64
551	stmg	%r0,%r7,__PT_R0(%r11)
552	# clear user controlled registers to prevent speculative use
553	xgr	%r0,%r0
554	xgr	%r1,%r1
555	xgr	%r3,%r3
556	xgr	%r4,%r4
557	xgr	%r5,%r5
558	xgr	%r6,%r6
559	xgr	%r7,%r7
560	xgr	%r10,%r10
561	mvc	__PT_R8(64,%r11),0(%r14)
562	stmg	%r8,%r9,__PT_PSW(%r11)
563	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
564	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
565	lgr	%r2,%r11		# pass pointer to pt_regs
566	brasl	%r14,s390_do_machine_check
567	cghi	%r2,0
568	je	.Lmcck_return
569	lg	%r1,__LC_KERNEL_STACK	# switch to kernel stack
570	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
571	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
572	la	%r11,STACK_FRAME_OVERHEAD(%r1)
573	lgr	%r2,%r11
574	lgr	%r15,%r1
575	brasl	%r14,s390_handle_mcck
576.Lmcck_return:
577	lctlg	%c1,%c1,__PT_CR1(%r11)
578	lmg	%r0,%r10,__PT_R0(%r11)
579	mvc	__LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
580	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
581	jno	0f
582	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
583	stpt	__LC_EXIT_TIMER
5840:	ALTERNATIVE "nop", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193
585	LBEAR	0(%r12)
586	lmg	%r11,%r15,__PT_R11(%r11)
587	LPSWEY	__LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE
588
589.Lmcck_panic:
590	/*
591	 * Iterate over all possible CPU addresses in the range 0..0xffff
592	 * and stop each CPU using signal processor. Use compare and swap
593	 * to allow just one CPU-stopper and prevent concurrent CPUs from
594	 * stopping each other while leaving the others running.
595	 */
596	lhi	%r5,0
597	lhi	%r6,1
598	larl	%r7,.Lstop_lock
599	cs	%r5,%r6,0(%r7)		# single CPU-stopper only
600	jnz	4f
601	larl	%r7,.Lthis_cpu
602	stap	0(%r7)			# this CPU address
603	lh	%r4,0(%r7)
604	nilh	%r4,0
605	lhi	%r0,1
606	sll	%r0,16			# CPU counter
607	lhi	%r3,0			# next CPU address
6080:	cr	%r3,%r4
609	je	2f
6101:	sigp	%r1,%r3,SIGP_STOP	# stop next CPU
611	brc	SIGP_CC_BUSY,1b
6122:	ahi	%r3,1
613	brct	%r0,0b
6143:	sigp	%r1,%r4,SIGP_STOP	# stop this CPU
615	brc	SIGP_CC_BUSY,3b
6164:	j	4b
617ENDPROC(mcck_int_handler)
618
619ENTRY(restart_int_handler)
620	ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40
621	stg	%r15,__LC_SAVE_AREA_RESTART
622	TSTMSK	__LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
623	jz	0f
624	la	%r15,4095
625	lctlg	%c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r15)
6260:	larl	%r15,.Lstosm_tmp
627	stosm	0(%r15),0x04			# turn dat on, keep irqs off
628	lg	%r15,__LC_RESTART_STACK
629	xc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
630	stmg	%r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
631	mvc	STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
632	mvc	STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW
633	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
634	lg	%r1,__LC_RESTART_FN		# load fn, parm & source cpu
635	lg	%r2,__LC_RESTART_DATA
636	lgf	%r3,__LC_RESTART_SOURCE
637	ltgr	%r3,%r3				# test source cpu address
638	jm	1f				# negative -> skip source stop
6390:	sigp	%r4,%r3,SIGP_SENSE		# sigp sense to source cpu
640	brc	10,0b				# wait for status stored
6411:	basr	%r14,%r1			# call function
642	stap	__SF_EMPTY(%r15)		# store cpu address
643	llgh	%r3,__SF_EMPTY(%r15)
6442:	sigp	%r4,%r3,SIGP_STOP		# sigp stop to current cpu
645	brc	2,2b
6463:	j	3b
647ENDPROC(restart_int_handler)
648
649	.section .kprobes.text, "ax"
650
651#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
652/*
653 * The synchronous or the asynchronous stack overflowed. We are dead.
654 * No need to properly save the registers, we are going to panic anyway.
655 * Setup a pt_regs so that show_trace can provide a good call trace.
656 */
657ENTRY(stack_overflow)
658	lg	%r15,__LC_NODAT_STACK	# change to panic stack
659	la	%r11,STACK_FRAME_OVERHEAD(%r15)
660	stmg	%r0,%r7,__PT_R0(%r11)
661	stmg	%r8,%r9,__PT_PSW(%r11)
662	mvc	__PT_R8(64,%r11),0(%r14)
663	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
664	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
665	lgr	%r2,%r11		# pass pointer to pt_regs
666	jg	kernel_stack_overflow
667ENDPROC(stack_overflow)
668#endif
669
670	.section .data, "aw"
671		.align	4
672.Lstop_lock:	.long	0
673.Lthis_cpu:	.short	0
674.Lstosm_tmp:	.byte	0
675	.section .rodata, "a"
676#define SYSCALL(esame,emu)	.quad __s390x_ ## esame
677	.globl	sys_call_table
678sys_call_table:
679#include "asm/syscall_table.h"
680#undef SYSCALL
681
682#ifdef CONFIG_COMPAT
683
684#define SYSCALL(esame,emu)	.quad __s390_ ## emu
685	.globl	sys_call_table_emu
686sys_call_table_emu:
687#include "asm/syscall_table.h"
688#undef SYSCALL
689#endif
690