xref: /linux/arch/s390/kernel/entry.S (revision 75b1a8f9d62e50f05d0e4e9f3c8bcde32527ffc1)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *    S390 low-level entry points.
4 *
5 *    Copyright IBM Corp. 1999, 2012
6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 *		 Hartmut Penner (hp@de.ibm.com),
8 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 *		 Heiko Carstens <heiko.carstens@de.ibm.com>
10 */
11
12#include <linux/init.h>
13#include <linux/linkage.h>
14#include <asm/alternative-asm.h>
15#include <asm/processor.h>
16#include <asm/cache.h>
17#include <asm/ctl_reg.h>
18#include <asm/dwarf.h>
19#include <asm/errno.h>
20#include <asm/ptrace.h>
21#include <asm/thread_info.h>
22#include <asm/asm-offsets.h>
23#include <asm/unistd.h>
24#include <asm/page.h>
25#include <asm/sigp.h>
26#include <asm/irq.h>
27#include <asm/vx-insn.h>
28#include <asm/setup.h>
29#include <asm/nmi.h>
30#include <asm/export.h>
31#include <asm/nospec-insn.h>
32
33__PT_R0      =	__PT_GPRS
34__PT_R1      =	__PT_GPRS + 8
35__PT_R2      =	__PT_GPRS + 16
36__PT_R3      =	__PT_GPRS + 24
37__PT_R4      =	__PT_GPRS + 32
38__PT_R5      =	__PT_GPRS + 40
39__PT_R6      =	__PT_GPRS + 48
40__PT_R7      =	__PT_GPRS + 56
41__PT_R8      =	__PT_GPRS + 64
42__PT_R9      =	__PT_GPRS + 72
43__PT_R10     =	__PT_GPRS + 80
44__PT_R11     =	__PT_GPRS + 88
45__PT_R12     =	__PT_GPRS + 96
46__PT_R13     =	__PT_GPRS + 104
47__PT_R14     =	__PT_GPRS + 112
48__PT_R15     =	__PT_GPRS + 120
49
50STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
51STACK_SIZE  = 1 << STACK_SHIFT
52STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
53
54_TIF_WORK	= (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
55		   _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING | \
56		   _TIF_NOTIFY_SIGNAL)
57_TIF_TRACE	= (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
58		   _TIF_SYSCALL_TRACEPOINT)
59_CIF_WORK	= (_CIF_FPU)
60_PIF_WORK	= (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
61
62_LPP_OFFSET	= __LC_LPP
63
64	.macro	TRACE_IRQS_ON
65#ifdef CONFIG_TRACE_IRQFLAGS
66	basr	%r2,%r0
67	brasl	%r14,trace_hardirqs_on_caller
68#endif
69	.endm
70
71	.macro	TRACE_IRQS_OFF
72#ifdef CONFIG_TRACE_IRQFLAGS
73	basr	%r2,%r0
74	brasl	%r14,trace_hardirqs_off_caller
75#endif
76	.endm
77
78	.macro	LOCKDEP_SYS_EXIT
79#ifdef CONFIG_LOCKDEP
80	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
81	jz	.+10
82	brasl	%r14,lockdep_sys_exit
83#endif
84	.endm
85
86	.macro	CHECK_STACK savearea
87#ifdef CONFIG_CHECK_STACK
88	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
89	lghi	%r14,\savearea
90	jz	stack_overflow
91#endif
92	.endm
93
94	.macro	DEBUG_USER_ASCE
95#ifdef CONFIG_DEBUG_USER_ASCE
96	brasl	%r14,debug_user_asce
97#endif
98	.endm
99
100	.macro	CHECK_VMAP_STACK savearea,oklabel
101#ifdef CONFIG_VMAP_STACK
102	lgr	%r14,%r15
103	nill	%r14,0x10000 - STACK_SIZE
104	oill	%r14,STACK_INIT
105	clg	%r14,__LC_KERNEL_STACK
106	je	\oklabel
107	clg	%r14,__LC_ASYNC_STACK
108	je	\oklabel
109	clg	%r14,__LC_NODAT_STACK
110	je	\oklabel
111	clg	%r14,__LC_RESTART_STACK
112	je	\oklabel
113	lghi	%r14,\savearea
114	j	stack_overflow
115#else
116	j	\oklabel
117#endif
118	.endm
119
120	.macro	SWITCH_ASYNC savearea,timer,clock
121	tmhh	%r8,0x0001		# interrupting from user ?
122	jnz	4f
123#if IS_ENABLED(CONFIG_KVM)
124	lgr	%r14,%r9
125	larl	%r13,.Lsie_gmap
126	slgr	%r14,%r13
127	lghi	%r13,.Lsie_done - .Lsie_gmap
128	clgr	%r14,%r13
129	jhe	0f
130	lghi	%r11,\savearea		# inside critical section, do cleanup
131	brasl	%r14,.Lcleanup_sie
132#endif
1330:	larl	%r13,.Lpsw_idle_exit
134	cgr	%r13,%r9
135	jne	3f
136
137	larl	%r1,smp_cpu_mtid
138	llgf	%r1,0(%r1)
139	ltgr	%r1,%r1
140	jz	2f			# no SMT, skip mt_cycles calculation
141	.insn	rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
142	larl	%r3,mt_cycles
143	ag	%r3,__LC_PERCPU_OFFSET
144	la	%r4,__SF_EMPTY+16(%r15)
1451:	lg	%r0,0(%r3)
146	slg	%r0,0(%r4)
147	alg	%r0,64(%r4)
148	stg	%r0,0(%r3)
149	la	%r3,8(%r3)
150	la	%r4,8(%r4)
151	brct	%r1,1b
152
1532:	mvc	__CLOCK_IDLE_EXIT(8,%r2), \clock
154	mvc	__TIMER_IDLE_EXIT(8,%r2), \timer
155	# account system time going idle
156	ni	__LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
157
158	lg	%r13,__LC_STEAL_TIMER
159	alg	%r13,__CLOCK_IDLE_ENTER(%r2)
160	slg	%r13,__LC_LAST_UPDATE_CLOCK
161	stg	%r13,__LC_STEAL_TIMER
162
163	mvc	__LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
164
165	lg	%r13,__LC_SYSTEM_TIMER
166	alg	%r13,__LC_LAST_UPDATE_TIMER
167	slg	%r13,__TIMER_IDLE_ENTER(%r2)
168	stg	%r13,__LC_SYSTEM_TIMER
169	mvc	__LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
170
171	nihh	%r8,0xfcfd		# clear wait state and irq bits
1723:	lg	%r14,__LC_ASYNC_STACK	# are we already on the target stack?
173	slgr	%r14,%r15
174	srag	%r14,%r14,STACK_SHIFT
175	jnz	5f
176	CHECK_STACK \savearea
177	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
178	j	6f
1794:	UPDATE_VTIME %r14,%r15,\timer
180	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
1815:	lg	%r15,__LC_ASYNC_STACK	# load async stack
1826:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
183	.endm
184
185	.macro UPDATE_VTIME w1,w2,enter_timer
186	lg	\w1,__LC_EXIT_TIMER
187	lg	\w2,__LC_LAST_UPDATE_TIMER
188	slg	\w1,\enter_timer
189	slg	\w2,__LC_EXIT_TIMER
190	alg	\w1,__LC_USER_TIMER
191	alg	\w2,__LC_SYSTEM_TIMER
192	stg	\w1,__LC_USER_TIMER
193	stg	\w2,__LC_SYSTEM_TIMER
194	mvc	__LC_LAST_UPDATE_TIMER(8),\enter_timer
195	.endm
196
197	.macro RESTORE_SM_CLEAR_PER
198	stg	%r8,__LC_RETURN_PSW
199	ni	__LC_RETURN_PSW,0xbf
200	ssm	__LC_RETURN_PSW
201	.endm
202
203	.macro ENABLE_INTS
204	stosm	__SF_EMPTY(%r15),3
205	.endm
206
207	.macro ENABLE_INTS_TRACE
208	TRACE_IRQS_ON
209	ENABLE_INTS
210	.endm
211
212	.macro DISABLE_INTS
213	stnsm	__SF_EMPTY(%r15),0xfc
214	.endm
215
216	.macro DISABLE_INTS_TRACE
217	DISABLE_INTS
218	TRACE_IRQS_OFF
219	.endm
220
221	.macro STCK savearea
222#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
223	.insn	s,0xb27c0000,\savearea		# store clock fast
224#else
225	.insn	s,0xb2050000,\savearea		# store clock
226#endif
227	.endm
228
229	/*
230	 * The TSTMSK macro generates a test-under-mask instruction by
231	 * calculating the memory offset for the specified mask value.
232	 * Mask value can be any constant.  The macro shifts the mask
233	 * value to calculate the memory offset for the test-under-mask
234	 * instruction.
235	 */
236	.macro TSTMSK addr, mask, size=8, bytepos=0
237		.if (\bytepos < \size) && (\mask >> 8)
238			.if (\mask & 0xff)
239				.error "Mask exceeds byte boundary"
240			.endif
241			TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
242			.exitm
243		.endif
244		.ifeq \mask
245			.error "Mask must not be zero"
246		.endif
247		off = \size - \bytepos - 1
248		tm	off+\addr, \mask
249	.endm
250
251	.macro BPOFF
252	ALTERNATIVE "", ".long 0xb2e8c000", 82
253	.endm
254
255	.macro BPON
256	ALTERNATIVE "", ".long 0xb2e8d000", 82
257	.endm
258
259	.macro BPENTER tif_ptr,tif_mask
260	ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \
261		    "", 82
262	.endm
263
264	.macro BPEXIT tif_ptr,tif_mask
265	TSTMSK	\tif_ptr,\tif_mask
266	ALTERNATIVE "jz .+8;  .long 0xb2e8c000", \
267		    "jnz .+8; .long 0xb2e8d000", 82
268	.endm
269
270	GEN_BR_THUNK %r9
271	GEN_BR_THUNK %r14
272	GEN_BR_THUNK %r14,%r11
273
274	.section .kprobes.text, "ax"
275.Ldummy:
276	/*
277	 * This nop exists only in order to avoid that __switch_to starts at
278	 * the beginning of the kprobes text section. In that case we would
279	 * have several symbols at the same address. E.g. objdump would take
280	 * an arbitrary symbol name when disassembling this code.
281	 * With the added nop in between the __switch_to symbol is unique
282	 * again.
283	 */
284	nop	0
285
286ENTRY(__bpon)
287	.globl __bpon
288	BPON
289	BR_EX	%r14
290ENDPROC(__bpon)
291
292/*
293 * Scheduler resume function, called by switch_to
294 *  gpr2 = (task_struct *) prev
295 *  gpr3 = (task_struct *) next
296 * Returns:
297 *  gpr2 = prev
298 */
299ENTRY(__switch_to)
300	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
301	lghi	%r4,__TASK_stack
302	lghi	%r1,__TASK_thread
303	llill	%r5,STACK_INIT
304	stg	%r15,__THREAD_ksp(%r1,%r2)	# store kernel stack of prev
305	lg	%r15,0(%r4,%r3)			# start of kernel stack of next
306	agr	%r15,%r5			# end of kernel stack of next
307	stg	%r3,__LC_CURRENT		# store task struct of next
308	stg	%r15,__LC_KERNEL_STACK		# store end of kernel stack
309	lg	%r15,__THREAD_ksp(%r1,%r3)	# load kernel stack of next
310	aghi	%r3,__TASK_pid
311	mvc	__LC_CURRENT_PID(4,%r0),0(%r3)	# store pid of next
312	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
313	ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
314	BR_EX	%r14
315ENDPROC(__switch_to)
316
317#if IS_ENABLED(CONFIG_KVM)
318/*
319 * sie64a calling convention:
320 * %r2 pointer to sie control block
321 * %r3 guest register save area
322 */
323ENTRY(sie64a)
324	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
325	lg	%r12,__LC_CURRENT
326	stg	%r2,__SF_SIE_CONTROL(%r15)	# save control block pointer
327	stg	%r3,__SF_SIE_SAVEAREA(%r15)	# save guest register save area
328	xc	__SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
329	mvc	__SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
330	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU		# load guest fp/vx registers ?
331	jno	.Lsie_load_guest_gprs
332	brasl	%r14,load_fpu_regs		# load guest fp/vx regs
333.Lsie_load_guest_gprs:
334	lmg	%r0,%r13,0(%r3)			# load guest gprs 0-13
335	lg	%r14,__LC_GMAP			# get gmap pointer
336	ltgr	%r14,%r14
337	jz	.Lsie_gmap
338	lctlg	%c1,%c1,__GMAP_ASCE(%r14)	# load primary asce
339.Lsie_gmap:
340	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
341	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
342	tm	__SIE_PROG20+3(%r14),3		# last exit...
343	jnz	.Lsie_skip
344	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
345	jo	.Lsie_skip			# exit if fp/vx regs changed
346	BPEXIT	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
347.Lsie_entry:
348	sie	0(%r14)
349	BPOFF
350	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
351.Lsie_skip:
352	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
353	lctlg	%c1,%c1,__LC_KERNEL_ASCE	# load primary asce
354.Lsie_done:
355# some program checks are suppressing. C code (e.g. do_protection_exception)
356# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
357# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
358# Other instructions between sie64a and .Lsie_done should not cause program
359# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
360# See also .Lcleanup_sie
361.Lrewind_pad6:
362	nopr	7
363.Lrewind_pad4:
364	nopr	7
365.Lrewind_pad2:
366	nopr	7
367	.globl sie_exit
368sie_exit:
369	lg	%r14,__SF_SIE_SAVEAREA(%r15)	# load guest register save area
370	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
371	xgr	%r0,%r0				# clear guest registers to
372	xgr	%r1,%r1				# prevent speculative use
373	xgr	%r2,%r2
374	xgr	%r3,%r3
375	xgr	%r4,%r4
376	xgr	%r5,%r5
377	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
378	lg	%r2,__SF_SIE_REASON(%r15)	# return exit reason code
379	BR_EX	%r14
380.Lsie_fault:
381	lghi	%r14,-EFAULT
382	stg	%r14,__SF_SIE_REASON(%r15)	# set exit reason code
383	j	sie_exit
384
385	EX_TABLE(.Lrewind_pad6,.Lsie_fault)
386	EX_TABLE(.Lrewind_pad4,.Lsie_fault)
387	EX_TABLE(.Lrewind_pad2,.Lsie_fault)
388	EX_TABLE(sie_exit,.Lsie_fault)
389ENDPROC(sie64a)
390EXPORT_SYMBOL(sie64a)
391EXPORT_SYMBOL(sie_exit)
392#endif
393
394/*
395 * SVC interrupt handler routine. System calls are synchronous events and
396 * are entered with interrupts disabled.
397 */
398
399ENTRY(system_call)
400	stpt	__LC_SYNC_ENTER_TIMER
401	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
402	BPOFF
403	lg	%r12,__LC_CURRENT
404	lghi	%r14,_PIF_SYSCALL
405.Lsysc_per:
406	lctlg	%c1,%c1,__LC_KERNEL_ASCE
407	lghi	%r13,__TASK_thread
408	lg	%r15,__LC_KERNEL_STACK
409	la	%r11,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
410	UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
411	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
412	stmg	%r0,%r7,__PT_R0(%r11)
413	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
414	mvc	__PT_PSW(16,%r11),__LC_SVC_OLD_PSW
415	mvc	__PT_INT_CODE(4,%r11),__LC_SVC_ILC
416	stg	%r14,__PT_FLAGS(%r11)
417	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
418	ENABLE_INTS
419.Lsysc_do_svc:
420	# clear user controlled register to prevent speculative use
421	xgr	%r0,%r0
422	# load address of system call table
423	lg	%r10,__THREAD_sysc_table(%r13,%r12)
424	llgh	%r8,__PT_INT_CODE+2(%r11)
425	slag	%r8,%r8,3			# shift and test for svc 0
426	jnz	.Lsysc_nr_ok
427	# svc 0: system call number in %r1
428	llgfr	%r1,%r1				# clear high word in r1
429	sth	%r1,__PT_INT_CODE+2(%r11)
430	cghi	%r1,NR_syscalls
431	jnl	.Lsysc_nr_ok
432	slag	%r8,%r1,3
433.Lsysc_nr_ok:
434	stg	%r2,__PT_ORIG_GPR2(%r11)
435	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
436	lg	%r9,0(%r8,%r10)			# get system call add.
437	TSTMSK	__TI_flags(%r12),_TIF_TRACE
438	jnz	.Lsysc_tracesys
439	BASR_EX	%r14,%r9			# call sys_xxxx
440	stg	%r2,__PT_R2(%r11)		# store return value
441
442.Lsysc_return:
443#ifdef CONFIG_DEBUG_RSEQ
444	lgr	%r2,%r11
445	brasl	%r14,rseq_syscall
446#endif
447	LOCKDEP_SYS_EXIT
448.Lsysc_tif:
449	DISABLE_INTS
450	TSTMSK	__PT_FLAGS(%r11),_PIF_WORK
451	jnz	.Lsysc_work
452	TSTMSK	__TI_flags(%r12),_TIF_WORK
453	jnz	.Lsysc_work			# check for work
454	DEBUG_USER_ASCE
455	lctlg	%c1,%c1,__LC_USER_ASCE
456	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
457	TSTMSK	__LC_CPU_FLAGS, _CIF_FPU
458	jz	.Lsysc_skip_fpu
459	brasl	%r14,load_fpu_regs
460.Lsysc_skip_fpu:
461	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
462	stpt	__LC_EXIT_TIMER
463	lmg	%r0,%r15,__PT_R0(%r11)
464	b	__LC_RETURN_LPSWE
465
466#
467# One of the work bits is on. Find out which one.
468#
469.Lsysc_work:
470	ENABLE_INTS
471	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
472	jo	.Lsysc_reschedule
473	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
474	jo	.Lsysc_syscall_restart
475#ifdef CONFIG_UPROBES
476	TSTMSK	__TI_flags(%r12),_TIF_UPROBE
477	jo	.Lsysc_uprobe_notify
478#endif
479	TSTMSK	__TI_flags(%r12),_TIF_GUARDED_STORAGE
480	jo	.Lsysc_guarded_storage
481	TSTMSK	__PT_FLAGS(%r11),_PIF_PER_TRAP
482	jo	.Lsysc_singlestep
483#ifdef CONFIG_LIVEPATCH
484	TSTMSK	__TI_flags(%r12),_TIF_PATCH_PENDING
485	jo	.Lsysc_patch_pending	# handle live patching just before
486					# signals and possible syscall restart
487#endif
488	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
489	jo	.Lsysc_syscall_restart
490	TSTMSK	__TI_flags(%r12),(_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL)
491	jnz	.Lsysc_sigpending
492	TSTMSK	__TI_flags(%r12),_TIF_NOTIFY_RESUME
493	jo	.Lsysc_notify_resume
494	j	.Lsysc_return
495
496#
497# _TIF_NEED_RESCHED is set, call schedule
498#
499.Lsysc_reschedule:
500	larl	%r14,.Lsysc_return
501	jg	schedule
502
503#
504# _TIF_SIGPENDING is set, call do_signal
505#
506.Lsysc_sigpending:
507	lgr	%r2,%r11		# pass pointer to pt_regs
508	brasl	%r14,do_signal
509	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL
510	jno	.Lsysc_return
511.Lsysc_do_syscall:
512	lghi	%r13,__TASK_thread
513	lmg	%r2,%r7,__PT_R2(%r11)	# load svc arguments
514	lghi	%r1,0			# svc 0 returns -ENOSYS
515	j	.Lsysc_do_svc
516
517#
518# _TIF_NOTIFY_RESUME is set, call do_notify_resume
519#
520.Lsysc_notify_resume:
521	lgr	%r2,%r11		# pass pointer to pt_regs
522	larl	%r14,.Lsysc_return
523	jg	do_notify_resume
524
525#
526# _TIF_UPROBE is set, call uprobe_notify_resume
527#
528#ifdef CONFIG_UPROBES
529.Lsysc_uprobe_notify:
530	lgr	%r2,%r11		# pass pointer to pt_regs
531	larl	%r14,.Lsysc_return
532	jg	uprobe_notify_resume
533#endif
534
535#
536# _TIF_GUARDED_STORAGE is set, call guarded_storage_load
537#
538.Lsysc_guarded_storage:
539	lgr	%r2,%r11		# pass pointer to pt_regs
540	larl	%r14,.Lsysc_return
541	jg	gs_load_bc_cb
542#
543# _TIF_PATCH_PENDING is set, call klp_update_patch_state
544#
545#ifdef CONFIG_LIVEPATCH
546.Lsysc_patch_pending:
547	lg	%r2,__LC_CURRENT	# pass pointer to task struct
548	larl	%r14,.Lsysc_return
549	jg	klp_update_patch_state
550#endif
551
552#
553# _PIF_PER_TRAP is set, call do_per_trap
554#
555.Lsysc_singlestep:
556	ni	__PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
557	lgr	%r2,%r11		# pass pointer to pt_regs
558	larl	%r14,.Lsysc_return
559	jg	do_per_trap
560
561#
562# _PIF_SYSCALL_RESTART is set, repeat the current system call
563#
564.Lsysc_syscall_restart:
565	ni	__PT_FLAGS+7(%r11),255-_PIF_SYSCALL_RESTART
566	lmg	%r1,%r7,__PT_R1(%r11)	# load svc arguments
567	lg	%r2,__PT_ORIG_GPR2(%r11)
568	j	.Lsysc_do_svc
569
570#
571# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
572# and after the system call
573#
574.Lsysc_tracesys:
575	lgr	%r2,%r11		# pass pointer to pt_regs
576	la	%r3,0
577	llgh	%r0,__PT_INT_CODE+2(%r11)
578	stg	%r0,__PT_R2(%r11)
579	brasl	%r14,do_syscall_trace_enter
580	lghi	%r0,NR_syscalls
581	clgr	%r0,%r2
582	jnh	.Lsysc_tracenogo
583	sllg	%r8,%r2,3
584	lg	%r9,0(%r8,%r10)
585	lmg	%r3,%r7,__PT_R3(%r11)
586	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
587	lg	%r2,__PT_ORIG_GPR2(%r11)
588	BASR_EX	%r14,%r9		# call sys_xxx
589	stg	%r2,__PT_R2(%r11)	# store return value
590.Lsysc_tracenogo:
591	TSTMSK	__TI_flags(%r12),_TIF_TRACE
592	jz	.Lsysc_return
593	lgr	%r2,%r11		# pass pointer to pt_regs
594	larl	%r14,.Lsysc_return
595	jg	do_syscall_trace_exit
596ENDPROC(system_call)
597
598#
599# a new process exits the kernel with ret_from_fork
600#
601ENTRY(ret_from_fork)
602	la	%r11,STACK_FRAME_OVERHEAD(%r15)
603	lg	%r12,__LC_CURRENT
604	brasl	%r14,schedule_tail
605	tm	__PT_PSW+1(%r11),0x01	# forking a kernel thread ?
606	jne	.Lsysc_tracenogo
607	# it's a kernel thread
608	lmg	%r9,%r10,__PT_R9(%r11)	# load gprs
609	la	%r2,0(%r10)
610	BASR_EX	%r14,%r9
611	j	.Lsysc_tracenogo
612ENDPROC(ret_from_fork)
613
614ENTRY(kernel_thread_starter)
615	la	%r2,0(%r10)
616	BASR_EX	%r14,%r9
617	j	.Lsysc_tracenogo
618ENDPROC(kernel_thread_starter)
619
620/*
621 * Program check handler routine
622 */
623
624ENTRY(pgm_check_handler)
625	stpt	__LC_SYNC_ENTER_TIMER
626	BPOFF
627	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
628	lg	%r10,__LC_LAST_BREAK
629	srag	%r11,%r10,12
630	jnz	0f
631	/* if __LC_LAST_BREAK is < 4096, it contains one of
632	 * the lpswe addresses in lowcore. Set it to 1 (initial state)
633	 * to prevent leaking that address to userspace.
634	 */
635	lghi	%r10,1
6360:	lg	%r12,__LC_CURRENT
637	lghi	%r11,0
638	lmg	%r8,%r9,__LC_PGM_OLD_PSW
639	tmhh	%r8,0x0001		# coming from user space?
640	jno	.Lpgm_skip_asce
641	lctlg	%c1,%c1,__LC_KERNEL_ASCE
642	j	3f
643.Lpgm_skip_asce:
644#if IS_ENABLED(CONFIG_KVM)
645	# cleanup critical section for program checks in sie64a
646	lgr	%r14,%r9
647	larl	%r13,.Lsie_gmap
648	slgr	%r14,%r13
649	lghi	%r13,.Lsie_done - .Lsie_gmap
650	clgr	%r14,%r13
651	jhe	1f
652	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
653	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
654	lctlg	%c1,%c1,__LC_KERNEL_ASCE	# load primary asce
655	larl	%r9,sie_exit			# skip forward to sie_exit
656	lghi	%r11,_PIF_GUEST_FAULT
657#endif
6581:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
659	jnz	2f			# -> enabled, can't be a double fault
660	tm	__LC_PGM_ILC+3,0x80	# check for per exception
661	jnz	.Lpgm_svcper		# -> single stepped svc
6622:	CHECK_STACK __LC_SAVE_AREA_SYNC
663	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
664	# CHECK_VMAP_STACK branches to stack_overflow or 5f
665	CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,5f
6663:	UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
667	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
668	lg	%r15,__LC_KERNEL_STACK
669	lgr	%r14,%r12
670	aghi	%r14,__TASK_thread	# pointer to thread_struct
671	lghi	%r13,__LC_PGM_TDB
672	tm	__LC_PGM_ILC+2,0x02	# check for transaction abort
673	jz	4f
674	mvc	__THREAD_trap_tdb(256,%r14),0(%r13)
6754:	stg	%r10,__THREAD_last_break(%r14)
6765:	lgr	%r13,%r11
677	la	%r11,STACK_FRAME_OVERHEAD(%r15)
678	stmg	%r0,%r7,__PT_R0(%r11)
679	# clear user controlled registers to prevent speculative use
680	xgr	%r0,%r0
681	xgr	%r1,%r1
682	xgr	%r2,%r2
683	xgr	%r3,%r3
684	xgr	%r4,%r4
685	xgr	%r5,%r5
686	xgr	%r6,%r6
687	xgr	%r7,%r7
688	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
689	stmg	%r8,%r9,__PT_PSW(%r11)
690	mvc	__PT_INT_CODE(4,%r11),__LC_PGM_ILC
691	mvc	__PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
692	stg	%r13,__PT_FLAGS(%r11)
693	stg	%r10,__PT_ARGS(%r11)
694	tm	__LC_PGM_ILC+3,0x80	# check for per exception
695	jz	6f
696	tmhh	%r8,0x0001		# kernel per event ?
697	jz	.Lpgm_kprobe
698	oi	__PT_FLAGS+7(%r11),_PIF_PER_TRAP
699	mvc	__THREAD_per_address(8,%r14),__LC_PER_ADDRESS
700	mvc	__THREAD_per_cause(2,%r14),__LC_PER_CODE
701	mvc	__THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
7026:	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
703	RESTORE_SM_CLEAR_PER
704	larl	%r1,pgm_check_table
705	llgh	%r10,__PT_INT_CODE+2(%r11)
706	nill	%r10,0x007f
707	sll	%r10,3
708	je	.Lpgm_return
709	lg	%r9,0(%r10,%r1)		# load address of handler routine
710	lgr	%r2,%r11		# pass pointer to pt_regs
711	BASR_EX	%r14,%r9		# branch to interrupt-handler
712.Lpgm_return:
713	LOCKDEP_SYS_EXIT
714	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
715	jno	.Lpgm_restore
716	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL
717	jo	.Lsysc_do_syscall
718	j	.Lsysc_tif
719.Lpgm_restore:
720	DISABLE_INTS
721	TSTMSK	__LC_CPU_FLAGS, _CIF_FPU
722	jz	.Lpgm_skip_fpu
723	brasl	%r14,load_fpu_regs
724.Lpgm_skip_fpu:
725	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
726	stpt	__LC_EXIT_TIMER
727	lmg	%r0,%r15,__PT_R0(%r11)
728	b	__LC_RETURN_LPSWE
729
730#
731# PER event in supervisor state, must be kprobes
732#
733.Lpgm_kprobe:
734	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
735	RESTORE_SM_CLEAR_PER
736	lgr	%r2,%r11		# pass pointer to pt_regs
737	brasl	%r14,do_per_trap
738	j	.Lpgm_return
739
740#
741# single stepped system call
742#
743.Lpgm_svcper:
744	mvc	__LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
745	larl	%r14,.Lsysc_per
746	stg	%r14,__LC_RETURN_PSW+8
747	lghi	%r14,_PIF_SYSCALL | _PIF_PER_TRAP
748	lpswe	__LC_RETURN_PSW		# branch to .Lsysc_per
749ENDPROC(pgm_check_handler)
750
751/*
752 * IO interrupt handler routine
753 */
754ENTRY(io_int_handler)
755	STCK	__LC_INT_CLOCK
756	stpt	__LC_ASYNC_ENTER_TIMER
757	BPOFF
758	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
759	lg	%r12,__LC_CURRENT
760	lmg	%r8,%r9,__LC_IO_OLD_PSW
761	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER,__LC_INT_CLOCK
762	stmg	%r0,%r7,__PT_R0(%r11)
763	# clear user controlled registers to prevent speculative use
764	xgr	%r0,%r0
765	xgr	%r1,%r1
766	xgr	%r2,%r2
767	xgr	%r3,%r3
768	xgr	%r4,%r4
769	xgr	%r5,%r5
770	xgr	%r6,%r6
771	xgr	%r7,%r7
772	xgr	%r10,%r10
773	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
774	stmg	%r8,%r9,__PT_PSW(%r11)
775	tm	__PT_PSW+1(%r11),0x01	# coming from user space?
776	jno	.Lio_skip_asce
777	lctlg	%c1,%c1,__LC_KERNEL_ASCE
778.Lio_skip_asce:
779	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
780	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
781	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
782	TRACE_IRQS_OFF
783.Lio_loop:
784	lgr	%r2,%r11		# pass pointer to pt_regs
785	lghi	%r3,IO_INTERRUPT
786	tm	__PT_INT_CODE+8(%r11),0x80	# adapter interrupt ?
787	jz	.Lio_call
788	lghi	%r3,THIN_INTERRUPT
789.Lio_call:
790	brasl	%r14,do_IRQ
791	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR
792	jz	.Lio_return
793	tpi	0
794	jz	.Lio_return
795	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
796	j	.Lio_loop
797.Lio_return:
798	LOCKDEP_SYS_EXIT
799	TSTMSK	__TI_flags(%r12),_TIF_WORK
800	jnz	.Lio_work		# there is work to do (signals etc.)
801	TSTMSK	__LC_CPU_FLAGS,_CIF_WORK
802	jnz	.Lio_work
803.Lio_restore:
804	TRACE_IRQS_ON
805	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
806	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
807	jno	.Lio_exit_kernel
808	DEBUG_USER_ASCE
809	lctlg	%c1,%c1,__LC_USER_ASCE
810	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
811	stpt	__LC_EXIT_TIMER
812.Lio_exit_kernel:
813	lmg	%r0,%r15,__PT_R0(%r11)
814	b	__LC_RETURN_LPSWE
815.Lio_done:
816
817#
818# There is work todo, find out in which context we have been interrupted:
819# 1) if we return to user space we can do all _TIF_WORK work
820# 2) if we return to kernel code and kvm is enabled check if we need to
821#    modify the psw to leave SIE
822# 3) if we return to kernel code and preemptive scheduling is enabled check
823#    the preemption counter and if it is zero call preempt_schedule_irq
824# Before any work can be done, a switch to the kernel stack is required.
825#
826.Lio_work:
827	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
828	jo	.Lio_work_user		# yes -> do resched & signal
829#ifdef CONFIG_PREEMPTION
830	# check for preemptive scheduling
831	icm	%r0,15,__LC_PREEMPT_COUNT
832	jnz	.Lio_restore		# preemption is disabled
833	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
834	jno	.Lio_restore
835	# switch to kernel stack
836	lg	%r1,__PT_R15(%r11)
837	aghi	%r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
838	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
839	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
840	la	%r11,STACK_FRAME_OVERHEAD(%r1)
841	lgr	%r15,%r1
842	brasl	%r14,preempt_schedule_irq
843	j	.Lio_return
844#else
845	j	.Lio_restore
846#endif
847
848#
849# Need to do work before returning to userspace, switch to kernel stack
850#
851.Lio_work_user:
852	lg	%r1,__LC_KERNEL_STACK
853	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
854	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
855	la	%r11,STACK_FRAME_OVERHEAD(%r1)
856	lgr	%r15,%r1
857
858#
859# One of the work bits is on. Find out which one.
860#
861	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
862	jo	.Lio_reschedule
863#ifdef CONFIG_LIVEPATCH
864	TSTMSK	__TI_flags(%r12),_TIF_PATCH_PENDING
865	jo	.Lio_patch_pending
866#endif
867	TSTMSK	__TI_flags(%r12),(_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL)
868	jnz	.Lio_sigpending
869	TSTMSK	__TI_flags(%r12),_TIF_NOTIFY_RESUME
870	jo	.Lio_notify_resume
871	TSTMSK	__TI_flags(%r12),_TIF_GUARDED_STORAGE
872	jo	.Lio_guarded_storage
873	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
874	jo	.Lio_vxrs
875	j	.Lio_return
876
877#
878# CIF_FPU is set, restore floating-point controls and floating-point registers.
879#
880.Lio_vxrs:
881	larl	%r14,.Lio_return
882	jg	load_fpu_regs
883
884#
885# _TIF_GUARDED_STORAGE is set, call guarded_storage_load
886#
887.Lio_guarded_storage:
888	ENABLE_INTS_TRACE
889	lgr	%r2,%r11		# pass pointer to pt_regs
890	brasl	%r14,gs_load_bc_cb
891	DISABLE_INTS_TRACE
892	j	.Lio_return
893
894#
895# _TIF_NEED_RESCHED is set, call schedule
896#
897.Lio_reschedule:
898	ENABLE_INTS_TRACE
899	brasl	%r14,schedule		# call scheduler
900	DISABLE_INTS_TRACE
901	j	.Lio_return
902
903#
904# _TIF_PATCH_PENDING is set, call klp_update_patch_state
905#
906#ifdef CONFIG_LIVEPATCH
907.Lio_patch_pending:
908	lg	%r2,__LC_CURRENT	# pass pointer to task struct
909	larl	%r14,.Lio_return
910	jg	klp_update_patch_state
911#endif
912
913#
914# _TIF_SIGPENDING or is set, call do_signal
915#
916.Lio_sigpending:
917	ENABLE_INTS_TRACE
918	lgr	%r2,%r11		# pass pointer to pt_regs
919	brasl	%r14,do_signal
920	DISABLE_INTS_TRACE
921	j	.Lio_return
922
923#
924# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
925#
926.Lio_notify_resume:
927	ENABLE_INTS_TRACE
928	lgr	%r2,%r11		# pass pointer to pt_regs
929	brasl	%r14,do_notify_resume
930	DISABLE_INTS_TRACE
931	j	.Lio_return
932ENDPROC(io_int_handler)
933
934/*
935 * External interrupt handler routine
936 */
937ENTRY(ext_int_handler)
938	STCK	__LC_INT_CLOCK
939	stpt	__LC_ASYNC_ENTER_TIMER
940	BPOFF
941	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
942	lg	%r12,__LC_CURRENT
943	lmg	%r8,%r9,__LC_EXT_OLD_PSW
944	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER,__LC_INT_CLOCK
945	stmg	%r0,%r7,__PT_R0(%r11)
946	# clear user controlled registers to prevent speculative use
947	xgr	%r0,%r0
948	xgr	%r1,%r1
949	xgr	%r2,%r2
950	xgr	%r3,%r3
951	xgr	%r4,%r4
952	xgr	%r5,%r5
953	xgr	%r6,%r6
954	xgr	%r7,%r7
955	xgr	%r10,%r10
956	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
957	stmg	%r8,%r9,__PT_PSW(%r11)
958	tm	__PT_PSW+1(%r11),0x01	# coming from user space?
959	jno	.Lext_skip_asce
960	lctlg	%c1,%c1,__LC_KERNEL_ASCE
961.Lext_skip_asce:
962	lghi	%r1,__LC_EXT_PARAMS2
963	mvc	__PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
964	mvc	__PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
965	mvc	__PT_INT_PARM_LONG(8,%r11),0(%r1)
966	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
967	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
968	TRACE_IRQS_OFF
969	lgr	%r2,%r11		# pass pointer to pt_regs
970	lghi	%r3,EXT_INTERRUPT
971	brasl	%r14,do_IRQ
972	j	.Lio_return
973ENDPROC(ext_int_handler)
974
975/*
976 * Load idle PSW.
977 */
978ENTRY(psw_idle)
979	stg	%r3,__SF_EMPTY(%r15)
980	larl	%r1,.Lpsw_idle_exit
981	stg	%r1,__SF_EMPTY+8(%r15)
982	larl	%r1,smp_cpu_mtid
983	llgf	%r1,0(%r1)
984	ltgr	%r1,%r1
985	jz	.Lpsw_idle_stcctm
986	.insn	rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
987.Lpsw_idle_stcctm:
988	oi	__LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
989	BPON
990	STCK	__CLOCK_IDLE_ENTER(%r2)
991	stpt	__TIMER_IDLE_ENTER(%r2)
992	lpswe	__SF_EMPTY(%r15)
993.Lpsw_idle_exit:
994	BR_EX	%r14
995ENDPROC(psw_idle)
996
997/*
998 * Store floating-point controls and floating-point or vector register
999 * depending whether the vector facility is available.	A critical section
1000 * cleanup assures that the registers are stored even if interrupted for
1001 * some other work.  The CIF_FPU flag is set to trigger a lazy restore
1002 * of the register contents at return from io or a system call.
1003 */
1004ENTRY(save_fpu_regs)
1005	stnsm	__SF_EMPTY(%r15),0xfc
1006	lg	%r2,__LC_CURRENT
1007	aghi	%r2,__TASK_thread
1008	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
1009	jo	.Lsave_fpu_regs_exit
1010	stfpc	__THREAD_FPU_fpc(%r2)
1011	lg	%r3,__THREAD_FPU_regs(%r2)
1012	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1013	jz	.Lsave_fpu_regs_fp	  # no -> store FP regs
1014	VSTM	%v0,%v15,0,%r3		  # vstm 0,15,0(3)
1015	VSTM	%v16,%v31,256,%r3	  # vstm 16,31,256(3)
1016	j	.Lsave_fpu_regs_done	  # -> set CIF_FPU flag
1017.Lsave_fpu_regs_fp:
1018	std	0,0(%r3)
1019	std	1,8(%r3)
1020	std	2,16(%r3)
1021	std	3,24(%r3)
1022	std	4,32(%r3)
1023	std	5,40(%r3)
1024	std	6,48(%r3)
1025	std	7,56(%r3)
1026	std	8,64(%r3)
1027	std	9,72(%r3)
1028	std	10,80(%r3)
1029	std	11,88(%r3)
1030	std	12,96(%r3)
1031	std	13,104(%r3)
1032	std	14,112(%r3)
1033	std	15,120(%r3)
1034.Lsave_fpu_regs_done:
1035	oi	__LC_CPU_FLAGS+7,_CIF_FPU
1036.Lsave_fpu_regs_exit:
1037	ssm	__SF_EMPTY(%r15)
1038	BR_EX	%r14
1039.Lsave_fpu_regs_end:
1040ENDPROC(save_fpu_regs)
1041EXPORT_SYMBOL(save_fpu_regs)
1042
1043/*
1044 * Load floating-point controls and floating-point or vector registers.
1045 * A critical section cleanup assures that the register contents are
1046 * loaded even if interrupted for some other work.
1047 *
1048 * There are special calling conventions to fit into sysc and io return work:
1049 *	%r15:	<kernel stack>
1050 * The function requires:
1051 *	%r4
1052 */
1053load_fpu_regs:
1054	stnsm	__SF_EMPTY(%r15),0xfc
1055	lg	%r4,__LC_CURRENT
1056	aghi	%r4,__TASK_thread
1057	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
1058	jno	.Lload_fpu_regs_exit
1059	lfpc	__THREAD_FPU_fpc(%r4)
1060	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1061	lg	%r4,__THREAD_FPU_regs(%r4)	# %r4 <- reg save area
1062	jz	.Lload_fpu_regs_fp		# -> no VX, load FP regs
1063	VLM	%v0,%v15,0,%r4
1064	VLM	%v16,%v31,256,%r4
1065	j	.Lload_fpu_regs_done
1066.Lload_fpu_regs_fp:
1067	ld	0,0(%r4)
1068	ld	1,8(%r4)
1069	ld	2,16(%r4)
1070	ld	3,24(%r4)
1071	ld	4,32(%r4)
1072	ld	5,40(%r4)
1073	ld	6,48(%r4)
1074	ld	7,56(%r4)
1075	ld	8,64(%r4)
1076	ld	9,72(%r4)
1077	ld	10,80(%r4)
1078	ld	11,88(%r4)
1079	ld	12,96(%r4)
1080	ld	13,104(%r4)
1081	ld	14,112(%r4)
1082	ld	15,120(%r4)
1083.Lload_fpu_regs_done:
1084	ni	__LC_CPU_FLAGS+7,255-_CIF_FPU
1085.Lload_fpu_regs_exit:
1086	ssm	__SF_EMPTY(%r15)
1087	BR_EX	%r14
1088.Lload_fpu_regs_end:
1089ENDPROC(load_fpu_regs)
1090
1091/*
1092 * Machine check handler routines
1093 */
1094ENTRY(mcck_int_handler)
1095	STCK	__LC_MCCK_CLOCK
1096	BPOFF
1097	la	%r1,4095		# validate r1
1098	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# validate cpu timer
1099	sckc	__LC_CLOCK_COMPARATOR			# validate comparator
1100	lam	%a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs
1101	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
1102	lg	%r12,__LC_CURRENT
1103	lmg	%r8,%r9,__LC_MCK_OLD_PSW
1104	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
1105	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
1106	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CR_VALID
1107	jno	.Lmcck_panic		# control registers invalid -> panic
1108	la	%r14,4095
1109	lctlg	%c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
1110	ptlb
1111	lg	%r11,__LC_MCESAD-4095(%r14) # extended machine check save area
1112	nill	%r11,0xfc00		# MCESA_ORIGIN_MASK
1113	TSTMSK	__LC_CREGS_SAVE_AREA+16-4095(%r14),CR2_GUARDED_STORAGE
1114	jno	0f
1115	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_GS_VALID
1116	jno	0f
1117	.insn	 rxy,0xe3000000004d,0,__MCESA_GS_SAVE_AREA(%r11) # LGSC
11180:	l	%r14,__LC_FP_CREG_SAVE_AREA-4095(%r14)
1119	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_FC_VALID
1120	jo	0f
1121	sr	%r14,%r14
11220:	sfpc	%r14
1123	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1124	jo	0f
1125	lghi	%r14,__LC_FPREGS_SAVE_AREA
1126	ld	%f0,0(%r14)
1127	ld	%f1,8(%r14)
1128	ld	%f2,16(%r14)
1129	ld	%f3,24(%r14)
1130	ld	%f4,32(%r14)
1131	ld	%f5,40(%r14)
1132	ld	%f6,48(%r14)
1133	ld	%f7,56(%r14)
1134	ld	%f8,64(%r14)
1135	ld	%f9,72(%r14)
1136	ld	%f10,80(%r14)
1137	ld	%f11,88(%r14)
1138	ld	%f12,96(%r14)
1139	ld	%f13,104(%r14)
1140	ld	%f14,112(%r14)
1141	ld	%f15,120(%r14)
1142	j	1f
11430:	VLM	%v0,%v15,0,%r11
1144	VLM	%v16,%v31,256,%r11
11451:	lghi	%r14,__LC_CPU_TIMER_SAVE_AREA
1146	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
1147	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
1148	jo	3f
1149	la	%r14,__LC_SYNC_ENTER_TIMER
1150	clc	0(8,%r14),__LC_ASYNC_ENTER_TIMER
1151	jl	0f
1152	la	%r14,__LC_ASYNC_ENTER_TIMER
11530:	clc	0(8,%r14),__LC_EXIT_TIMER
1154	jl	1f
1155	la	%r14,__LC_EXIT_TIMER
11561:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
1157	jl	2f
1158	la	%r14,__LC_LAST_UPDATE_TIMER
11592:	spt	0(%r14)
1160	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
11613:	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
1162	jno	.Lmcck_panic
1163	tmhh	%r8,0x0001		# interrupting from user ?
1164	jnz	4f
1165	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
1166	jno	.Lmcck_panic
11674:	ssm	__LC_PGM_NEW_PSW	# turn dat on, keep irqs off
1168	SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER,__LC_MCCK_CLOCK
1169.Lmcck_skip:
1170	lghi	%r14,__LC_GPREGS_SAVE_AREA+64
1171	stmg	%r0,%r7,__PT_R0(%r11)
1172	# clear user controlled registers to prevent speculative use
1173	xgr	%r0,%r0
1174	xgr	%r1,%r1
1175	xgr	%r2,%r2
1176	xgr	%r3,%r3
1177	xgr	%r4,%r4
1178	xgr	%r5,%r5
1179	xgr	%r6,%r6
1180	xgr	%r7,%r7
1181	xgr	%r10,%r10
1182	mvc	__PT_R8(64,%r11),0(%r14)
1183	stmg	%r8,%r9,__PT_PSW(%r11)
1184	la	%r14,4095
1185	mvc	__PT_CR1(8,%r11),__LC_CREGS_SAVE_AREA-4095+8(%r14)
1186	lctlg	%c1,%c1,__LC_KERNEL_ASCE
1187	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
1188	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
1189	lgr	%r2,%r11		# pass pointer to pt_regs
1190	brasl	%r14,s390_do_machine_check
1191	cghi	%r2,0
1192	je	.Lmcck_return
1193	lg	%r1,__LC_KERNEL_STACK	# switch to kernel stack
1194	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
1195	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
1196	la	%r11,STACK_FRAME_OVERHEAD(%r1)
1197	lgr	%r15,%r1
1198	TRACE_IRQS_OFF
1199	brasl	%r14,s390_handle_mcck
1200	TRACE_IRQS_ON
1201.Lmcck_return:
1202	lctlg	%c1,%c1,__PT_CR1(%r11)
1203	lmg	%r0,%r10,__PT_R0(%r11)
1204	mvc	__LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
1205	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
1206	jno	0f
1207	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
1208	stpt	__LC_EXIT_TIMER
12090:	lmg	%r11,%r15,__PT_R11(%r11)
1210	b	__LC_RETURN_MCCK_LPSWE
1211
1212.Lmcck_panic:
1213	lg	%r15,__LC_NODAT_STACK
1214	la	%r11,STACK_FRAME_OVERHEAD(%r15)
1215	j	.Lmcck_skip
1216ENDPROC(mcck_int_handler)
1217
1218#
1219# PSW restart interrupt handler
1220#
1221ENTRY(restart_int_handler)
1222	ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
1223	stg	%r15,__LC_SAVE_AREA_RESTART
1224	lg	%r15,__LC_RESTART_STACK
1225	xc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
1226	stmg	%r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
1227	mvc	STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
1228	mvc	STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW
1229	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
1230	lg	%r1,__LC_RESTART_FN		# load fn, parm & source cpu
1231	lg	%r2,__LC_RESTART_DATA
1232	lg	%r3,__LC_RESTART_SOURCE
1233	ltgr	%r3,%r3				# test source cpu address
1234	jm	1f				# negative -> skip source stop
12350:	sigp	%r4,%r3,SIGP_SENSE		# sigp sense to source cpu
1236	brc	10,0b				# wait for status stored
12371:	basr	%r14,%r1			# call function
1238	stap	__SF_EMPTY(%r15)		# store cpu address
1239	llgh	%r3,__SF_EMPTY(%r15)
12402:	sigp	%r4,%r3,SIGP_STOP		# sigp stop to current cpu
1241	brc	2,2b
12423:	j	3b
1243ENDPROC(restart_int_handler)
1244
1245	.section .kprobes.text, "ax"
1246
1247#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
1248/*
1249 * The synchronous or the asynchronous stack overflowed. We are dead.
1250 * No need to properly save the registers, we are going to panic anyway.
1251 * Setup a pt_regs so that show_trace can provide a good call trace.
1252 */
1253ENTRY(stack_overflow)
1254	lg	%r15,__LC_NODAT_STACK	# change to panic stack
1255	la	%r11,STACK_FRAME_OVERHEAD(%r15)
1256	stmg	%r0,%r7,__PT_R0(%r11)
1257	stmg	%r8,%r9,__PT_PSW(%r11)
1258	mvc	__PT_R8(64,%r11),0(%r14)
1259	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
1260	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
1261	lgr	%r2,%r11		# pass pointer to pt_regs
1262	jg	kernel_stack_overflow
1263ENDPROC(stack_overflow)
1264#endif
1265
1266#if IS_ENABLED(CONFIG_KVM)
1267.Lcleanup_sie:
1268	cghi	%r11,__LC_SAVE_AREA_ASYNC	#Is this in normal interrupt?
1269	je	1f
1270	larl	%r13,.Lsie_entry
1271	slgr	%r9,%r13
1272	larl	%r13,.Lsie_skip
1273	clgr	%r9,%r13
1274	jh	1f
1275	oi	__LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
12761:	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
1277	lg	%r9,__SF_SIE_CONTROL(%r15)	# get control block pointer
1278	ni	__SIE_PROG0C+3(%r9),0xfe	# no longer in SIE
1279	lctlg	%c1,%c1,__LC_KERNEL_ASCE
1280	larl	%r9,sie_exit			# skip forward to sie_exit
1281	BR_EX	%r14,%r11
1282
1283#endif
1284	.section .rodata, "a"
1285#define SYSCALL(esame,emu)	.quad __s390x_ ## esame
1286	.globl	sys_call_table
1287sys_call_table:
1288#include "asm/syscall_table.h"
1289#undef SYSCALL
1290
1291#ifdef CONFIG_COMPAT
1292
1293#define SYSCALL(esame,emu)	.quad __s390_ ## emu
1294	.globl	sys_call_table_emu
1295sys_call_table_emu:
1296#include "asm/syscall_table.h"
1297#undef SYSCALL
1298#endif
1299