xref: /linux/arch/s390/kernel/entry.S (revision c0e297dc61f8d4453e07afbea1fa8d0e67cd4a34)
1/*
2 *    S390 low-level entry points.
3 *
4 *    Copyright IBM Corp. 1999, 2012
5 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
6 *		 Hartmut Penner (hp@de.ibm.com),
7 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
8 *		 Heiko Carstens <heiko.carstens@de.ibm.com>
9 */
10
11#include <linux/init.h>
12#include <linux/linkage.h>
13#include <asm/processor.h>
14#include <asm/cache.h>
15#include <asm/errno.h>
16#include <asm/ptrace.h>
17#include <asm/thread_info.h>
18#include <asm/asm-offsets.h>
19#include <asm/unistd.h>
20#include <asm/page.h>
21#include <asm/sigp.h>
22#include <asm/irq.h>
23
24__PT_R0      =	__PT_GPRS
25__PT_R1      =	__PT_GPRS + 8
26__PT_R2      =	__PT_GPRS + 16
27__PT_R3      =	__PT_GPRS + 24
28__PT_R4      =	__PT_GPRS + 32
29__PT_R5      =	__PT_GPRS + 40
30__PT_R6      =	__PT_GPRS + 48
31__PT_R7      =	__PT_GPRS + 56
32__PT_R8      =	__PT_GPRS + 64
33__PT_R9      =	__PT_GPRS + 72
34__PT_R10     =	__PT_GPRS + 80
35__PT_R11     =	__PT_GPRS + 88
36__PT_R12     =	__PT_GPRS + 96
37__PT_R13     =	__PT_GPRS + 104
38__PT_R14     =	__PT_GPRS + 112
39__PT_R15     =	__PT_GPRS + 120
40
41STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
42STACK_SIZE  = 1 << STACK_SHIFT
43STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
44
45_TIF_WORK	= (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
46		   _TIF_UPROBE)
47_TIF_TRACE	= (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
48		   _TIF_SYSCALL_TRACEPOINT)
49_CIF_WORK	= (_CIF_MCCK_PENDING | _CIF_ASCE)
50_PIF_WORK	= (_PIF_PER_TRAP)
51
52#define BASED(name) name-system_call(%r13)
53
54	.macro	TRACE_IRQS_ON
55#ifdef CONFIG_TRACE_IRQFLAGS
56	basr	%r2,%r0
57	brasl	%r14,trace_hardirqs_on_caller
58#endif
59	.endm
60
61	.macro	TRACE_IRQS_OFF
62#ifdef CONFIG_TRACE_IRQFLAGS
63	basr	%r2,%r0
64	brasl	%r14,trace_hardirqs_off_caller
65#endif
66	.endm
67
68	.macro	LOCKDEP_SYS_EXIT
69#ifdef CONFIG_LOCKDEP
70	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
71	jz	.+10
72	brasl	%r14,lockdep_sys_exit
73#endif
74	.endm
75
76	.macro LPP newpp
77#if IS_ENABLED(CONFIG_KVM)
78	tm	__LC_MACHINE_FLAGS+6,0x20	# MACHINE_FLAG_LPP
79	jz	.+8
80	.insn	s,0xb2800000,\newpp
81#endif
82	.endm
83
84	.macro	HANDLE_SIE_INTERCEPT scratch,reason
85#if IS_ENABLED(CONFIG_KVM)
86	tmhh	%r8,0x0001		# interrupting from user ?
87	jnz	.+62
88	lgr	\scratch,%r9
89	slg	\scratch,BASED(.Lsie_critical)
90	clg	\scratch,BASED(.Lsie_critical_length)
91	.if	\reason==1
92	# Some program interrupts are suppressing (e.g. protection).
93	# We must also check the instruction after SIE in that case.
94	# do_protection_exception will rewind to .Lrewind_pad
95	jh	.+42
96	.else
97	jhe	.+42
98	.endif
99	lg	%r14,__SF_EMPTY(%r15)		# get control block pointer
100	LPP	__SF_EMPTY+16(%r15)		# set host id
101	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
102	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
103	larl	%r9,sie_exit			# skip forward to sie_exit
104	mvi	__SF_EMPTY+31(%r15),\reason	# set exit reason
105#endif
106	.endm
107
108	.macro	CHECK_STACK stacksize,savearea
109#ifdef CONFIG_CHECK_STACK
110	tml	%r15,\stacksize - CONFIG_STACK_GUARD
111	lghi	%r14,\savearea
112	jz	stack_overflow
113#endif
114	.endm
115
116	.macro	SWITCH_ASYNC savearea,stack,shift
117	tmhh	%r8,0x0001		# interrupting from user ?
118	jnz	1f
119	lgr	%r14,%r9
120	slg	%r14,BASED(.Lcritical_start)
121	clg	%r14,BASED(.Lcritical_length)
122	jhe	0f
123	lghi	%r11,\savearea		# inside critical section, do cleanup
124	brasl	%r14,cleanup_critical
125	tmhh	%r8,0x0001		# retest problem state after cleanup
126	jnz	1f
1270:	lg	%r14,\stack		# are we already on the target stack?
128	slgr	%r14,%r15
129	srag	%r14,%r14,\shift
130	jnz	1f
131	CHECK_STACK 1<<\shift,\savearea
132	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
133	j	2f
1341:	lg	%r15,\stack		# load target stack
1352:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
136	.endm
137
138	.macro UPDATE_VTIME scratch,enter_timer
139	lg	\scratch,__LC_EXIT_TIMER
140	slg	\scratch,\enter_timer
141	alg	\scratch,__LC_USER_TIMER
142	stg	\scratch,__LC_USER_TIMER
143	lg	\scratch,__LC_LAST_UPDATE_TIMER
144	slg	\scratch,__LC_EXIT_TIMER
145	alg	\scratch,__LC_SYSTEM_TIMER
146	stg	\scratch,__LC_SYSTEM_TIMER
147	mvc	__LC_LAST_UPDATE_TIMER(8),\enter_timer
148	.endm
149
150	.macro	LAST_BREAK scratch
151	srag	\scratch,%r10,23
152	jz	.+10
153	stg	%r10,__TI_last_break(%r12)
154	.endm
155
156	.macro REENABLE_IRQS
157	stg	%r8,__LC_RETURN_PSW
158	ni	__LC_RETURN_PSW,0xbf
159	ssm	__LC_RETURN_PSW
160	.endm
161
162	.macro STCK savearea
163#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
164	.insn	s,0xb27c0000,\savearea		# store clock fast
165#else
166	.insn	s,0xb2050000,\savearea		# store clock
167#endif
168	.endm
169
170	.section .kprobes.text, "ax"
171
172/*
173 * Scheduler resume function, called by switch_to
174 *  gpr2 = (task_struct *) prev
175 *  gpr3 = (task_struct *) next
176 * Returns:
177 *  gpr2 = prev
178 */
179ENTRY(__switch_to)
180	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
181	lgr	%r1,%r2
182	aghi	%r1,__TASK_thread		# thread_struct of prev task
183	lg	%r4,__TASK_thread_info(%r2)	# get thread_info of prev
184	lg	%r5,__TASK_thread_info(%r3)	# get thread_info of next
185	stg	%r15,__THREAD_ksp(%r1)		# store kernel stack of prev
186	lgr	%r1,%r3
187	aghi	%r1,__TASK_thread		# thread_struct of next task
188	lgr	%r15,%r5
189	aghi	%r15,STACK_INIT			# end of kernel stack of next
190	stg	%r3,__LC_CURRENT		# store task struct of next
191	stg	%r5,__LC_THREAD_INFO		# store thread info of next
192	stg	%r15,__LC_KERNEL_STACK		# store end of kernel stack
193	lg	%r15,__THREAD_ksp(%r1)		# load kernel stack of next
194	lctl	%c4,%c4,__TASK_pid(%r3)		# load pid to control reg. 4
195	mvc	__LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
196	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
197	br	%r14
198
199.L__critical_start:
200/*
201 * SVC interrupt handler routine. System calls are synchronous events and
202 * are executed with interrupts enabled.
203 */
204
205ENTRY(system_call)
206	stpt	__LC_SYNC_ENTER_TIMER
207.Lsysc_stmg:
208	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
209	lg	%r10,__LC_LAST_BREAK
210	lg	%r12,__LC_THREAD_INFO
211	lghi	%r14,_PIF_SYSCALL
212.Lsysc_per:
213	lg	%r15,__LC_KERNEL_STACK
214	la	%r11,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
215.Lsysc_vtime:
216	UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER
217	LAST_BREAK %r13
218	stmg	%r0,%r7,__PT_R0(%r11)
219	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
220	mvc	__PT_PSW(16,%r11),__LC_SVC_OLD_PSW
221	mvc	__PT_INT_CODE(4,%r11),__LC_SVC_ILC
222	stg	%r14,__PT_FLAGS(%r11)
223.Lsysc_do_svc:
224	lg	%r10,__TI_sysc_table(%r12)	# address of system call table
225	llgh	%r8,__PT_INT_CODE+2(%r11)
226	slag	%r8,%r8,2			# shift and test for svc 0
227	jnz	.Lsysc_nr_ok
228	# svc 0: system call number in %r1
229	llgfr	%r1,%r1				# clear high word in r1
230	cghi	%r1,NR_syscalls
231	jnl	.Lsysc_nr_ok
232	sth	%r1,__PT_INT_CODE+2(%r11)
233	slag	%r8,%r1,2
234.Lsysc_nr_ok:
235	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
236	stg	%r2,__PT_ORIG_GPR2(%r11)
237	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
238	lgf	%r9,0(%r8,%r10)			# get system call add.
239	tm	__TI_flags+7(%r12),_TIF_TRACE
240	jnz	.Lsysc_tracesys
241	basr	%r14,%r9			# call sys_xxxx
242	stg	%r2,__PT_R2(%r11)		# store return value
243
244.Lsysc_return:
245	LOCKDEP_SYS_EXIT
246.Lsysc_tif:
247	tm	__PT_PSW+1(%r11),0x01		# returning to user ?
248	jno	.Lsysc_restore
249	tm	__PT_FLAGS+7(%r11),_PIF_WORK
250	jnz	.Lsysc_work
251	tm	__TI_flags+7(%r12),_TIF_WORK
252	jnz	.Lsysc_work			# check for work
253	tm	__LC_CPU_FLAGS+7,_CIF_WORK
254	jnz	.Lsysc_work
255.Lsysc_restore:
256	lg	%r14,__LC_VDSO_PER_CPU
257	lmg	%r0,%r10,__PT_R0(%r11)
258	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
259	stpt	__LC_EXIT_TIMER
260	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
261	lmg	%r11,%r15,__PT_R11(%r11)
262	lpswe	__LC_RETURN_PSW
263.Lsysc_done:
264
265#
266# One of the work bits is on. Find out which one.
267#
268.Lsysc_work:
269	tm	__LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
270	jo	.Lsysc_mcck_pending
271	tm	__TI_flags+7(%r12),_TIF_NEED_RESCHED
272	jo	.Lsysc_reschedule
273#ifdef CONFIG_UPROBES
274	tm	__TI_flags+7(%r12),_TIF_UPROBE
275	jo	.Lsysc_uprobe_notify
276#endif
277	tm	__PT_FLAGS+7(%r11),_PIF_PER_TRAP
278	jo	.Lsysc_singlestep
279	tm	__TI_flags+7(%r12),_TIF_SIGPENDING
280	jo	.Lsysc_sigpending
281	tm	__TI_flags+7(%r12),_TIF_NOTIFY_RESUME
282	jo	.Lsysc_notify_resume
283	tm	__LC_CPU_FLAGS+7,_CIF_ASCE
284	jo	.Lsysc_uaccess
285	j	.Lsysc_return		# beware of critical section cleanup
286
287#
288# _TIF_NEED_RESCHED is set, call schedule
289#
290.Lsysc_reschedule:
291	larl	%r14,.Lsysc_return
292	jg	schedule
293
294#
295# _CIF_MCCK_PENDING is set, call handler
296#
297.Lsysc_mcck_pending:
298	larl	%r14,.Lsysc_return
299	jg	s390_handle_mcck	# TIF bit will be cleared by handler
300
301#
302# _CIF_ASCE is set, load user space asce
303#
304.Lsysc_uaccess:
305	ni	__LC_CPU_FLAGS+7,255-_CIF_ASCE
306	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
307	j	.Lsysc_return
308
309#
310# _TIF_SIGPENDING is set, call do_signal
311#
312.Lsysc_sigpending:
313	lgr	%r2,%r11		# pass pointer to pt_regs
314	brasl	%r14,do_signal
315	tm	__PT_FLAGS+7(%r11),_PIF_SYSCALL
316	jno	.Lsysc_return
317	lmg	%r2,%r7,__PT_R2(%r11)	# load svc arguments
318	lg	%r10,__TI_sysc_table(%r12)	# address of system call table
319	lghi	%r8,0			# svc 0 returns -ENOSYS
320	llgh	%r1,__PT_INT_CODE+2(%r11)	# load new svc number
321	cghi	%r1,NR_syscalls
322	jnl	.Lsysc_nr_ok		# invalid svc number -> do svc 0
323	slag	%r8,%r1,2
324	j	.Lsysc_nr_ok		# restart svc
325
326#
327# _TIF_NOTIFY_RESUME is set, call do_notify_resume
328#
329.Lsysc_notify_resume:
330	lgr	%r2,%r11		# pass pointer to pt_regs
331	larl	%r14,.Lsysc_return
332	jg	do_notify_resume
333
334#
335# _TIF_UPROBE is set, call uprobe_notify_resume
336#
337#ifdef CONFIG_UPROBES
338.Lsysc_uprobe_notify:
339	lgr	%r2,%r11		# pass pointer to pt_regs
340	larl	%r14,.Lsysc_return
341	jg	uprobe_notify_resume
342#endif
343
344#
345# _PIF_PER_TRAP is set, call do_per_trap
346#
347.Lsysc_singlestep:
348	ni	__PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
349	lgr	%r2,%r11		# pass pointer to pt_regs
350	larl	%r14,.Lsysc_return
351	jg	do_per_trap
352
353#
354# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
355# and after the system call
356#
357.Lsysc_tracesys:
358	lgr	%r2,%r11		# pass pointer to pt_regs
359	la	%r3,0
360	llgh	%r0,__PT_INT_CODE+2(%r11)
361	stg	%r0,__PT_R2(%r11)
362	brasl	%r14,do_syscall_trace_enter
363	lghi	%r0,NR_syscalls
364	clgr	%r0,%r2
365	jnh	.Lsysc_tracenogo
366	sllg	%r8,%r2,2
367	lgf	%r9,0(%r8,%r10)
368.Lsysc_tracego:
369	lmg	%r3,%r7,__PT_R3(%r11)
370	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
371	lg	%r2,__PT_ORIG_GPR2(%r11)
372	basr	%r14,%r9		# call sys_xxx
373	stg	%r2,__PT_R2(%r11)	# store return value
374.Lsysc_tracenogo:
375	tm	__TI_flags+7(%r12),_TIF_TRACE
376	jz	.Lsysc_return
377	lgr	%r2,%r11		# pass pointer to pt_regs
378	larl	%r14,.Lsysc_return
379	jg	do_syscall_trace_exit
380
381#
382# a new process exits the kernel with ret_from_fork
383#
384ENTRY(ret_from_fork)
385	la	%r11,STACK_FRAME_OVERHEAD(%r15)
386	lg	%r12,__LC_THREAD_INFO
387	brasl	%r14,schedule_tail
388	TRACE_IRQS_ON
389	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
390	tm	__PT_PSW+1(%r11),0x01	# forking a kernel thread ?
391	jne	.Lsysc_tracenogo
392	# it's a kernel thread
393	lmg	%r9,%r10,__PT_R9(%r11)	# load gprs
394ENTRY(kernel_thread_starter)
395	la	%r2,0(%r10)
396	basr	%r14,%r9
397	j	.Lsysc_tracenogo
398
399/*
400 * Program check handler routine
401 */
402
403ENTRY(pgm_check_handler)
404	stpt	__LC_SYNC_ENTER_TIMER
405	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
406	lg	%r10,__LC_LAST_BREAK
407	lg	%r12,__LC_THREAD_INFO
408	larl	%r13,system_call
409	lmg	%r8,%r9,__LC_PGM_OLD_PSW
410	HANDLE_SIE_INTERCEPT %r14,1
411	tmhh	%r8,0x0001		# test problem state bit
412	jnz	1f			# -> fault in user space
413	tmhh	%r8,0x4000		# PER bit set in old PSW ?
414	jnz	0f			# -> enabled, can't be a double fault
415	tm	__LC_PGM_ILC+3,0x80	# check for per exception
416	jnz	.Lpgm_svcper		# -> single stepped svc
4170:	CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
418	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
419	j	2f
4201:	UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER
421	LAST_BREAK %r14
422	lg	%r15,__LC_KERNEL_STACK
423	lg	%r14,__TI_task(%r12)
424	aghi	%r14,__TASK_thread	# pointer to thread_struct
425	lghi	%r13,__LC_PGM_TDB
426	tm	__LC_PGM_ILC+2,0x02	# check for transaction abort
427	jz	2f
428	mvc	__THREAD_trap_tdb(256,%r14),0(%r13)
4292:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
430	stmg	%r0,%r7,__PT_R0(%r11)
431	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
432	stmg	%r8,%r9,__PT_PSW(%r11)
433	mvc	__PT_INT_CODE(4,%r11),__LC_PGM_ILC
434	mvc	__PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
435	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
436	stg	%r10,__PT_ARGS(%r11)
437	tm	__LC_PGM_ILC+3,0x80	# check for per exception
438	jz	0f
439	tmhh	%r8,0x0001		# kernel per event ?
440	jz	.Lpgm_kprobe
441	oi	__PT_FLAGS+7(%r11),_PIF_PER_TRAP
442	mvc	__THREAD_per_address(8,%r14),__LC_PER_ADDRESS
443	mvc	__THREAD_per_cause(2,%r14),__LC_PER_CODE
444	mvc	__THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
4450:	REENABLE_IRQS
446	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
447	larl	%r1,pgm_check_table
448	llgh	%r10,__PT_INT_CODE+2(%r11)
449	nill	%r10,0x007f
450	sll	%r10,2
451	je	.Lsysc_return
452	lgf	%r1,0(%r10,%r1)		# load address of handler routine
453	lgr	%r2,%r11		# pass pointer to pt_regs
454	basr	%r14,%r1		# branch to interrupt-handler
455	j	.Lsysc_return
456
457#
458# PER event in supervisor state, must be kprobes
459#
460.Lpgm_kprobe:
461	REENABLE_IRQS
462	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
463	lgr	%r2,%r11		# pass pointer to pt_regs
464	brasl	%r14,do_per_trap
465	j	.Lsysc_return
466
467#
468# single stepped system call
469#
470.Lpgm_svcper:
471	mvc	__LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
472	larl	%r14,.Lsysc_per
473	stg	%r14,__LC_RETURN_PSW+8
474	lghi	%r14,_PIF_SYSCALL | _PIF_PER_TRAP
475	lpswe	__LC_RETURN_PSW		# branch to .Lsysc_per and enable irqs
476
477/*
478 * IO interrupt handler routine
479 */
480ENTRY(io_int_handler)
481	STCK	__LC_INT_CLOCK
482	stpt	__LC_ASYNC_ENTER_TIMER
483	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
484	lg	%r10,__LC_LAST_BREAK
485	lg	%r12,__LC_THREAD_INFO
486	larl	%r13,system_call
487	lmg	%r8,%r9,__LC_IO_OLD_PSW
488	HANDLE_SIE_INTERCEPT %r14,2
489	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
490	tmhh	%r8,0x0001		# interrupting from user?
491	jz	.Lio_skip
492	UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
493	LAST_BREAK %r14
494.Lio_skip:
495	stmg	%r0,%r7,__PT_R0(%r11)
496	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
497	stmg	%r8,%r9,__PT_PSW(%r11)
498	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
499	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
500	TRACE_IRQS_OFF
501	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
502.Lio_loop:
503	lgr	%r2,%r11		# pass pointer to pt_regs
504	lghi	%r3,IO_INTERRUPT
505	tm	__PT_INT_CODE+8(%r11),0x80	# adapter interrupt ?
506	jz	.Lio_call
507	lghi	%r3,THIN_INTERRUPT
508.Lio_call:
509	brasl	%r14,do_IRQ
510	tm	__LC_MACHINE_FLAGS+6,0x10	# MACHINE_FLAG_LPAR
511	jz	.Lio_return
512	tpi	0
513	jz	.Lio_return
514	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
515	j	.Lio_loop
516.Lio_return:
517	LOCKDEP_SYS_EXIT
518	TRACE_IRQS_ON
519.Lio_tif:
520	tm	__TI_flags+7(%r12),_TIF_WORK
521	jnz	.Lio_work		# there is work to do (signals etc.)
522	tm	__LC_CPU_FLAGS+7,_CIF_WORK
523	jnz	.Lio_work
524.Lio_restore:
525	lg	%r14,__LC_VDSO_PER_CPU
526	lmg	%r0,%r10,__PT_R0(%r11)
527	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
528	stpt	__LC_EXIT_TIMER
529	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
530	lmg	%r11,%r15,__PT_R11(%r11)
531	lpswe	__LC_RETURN_PSW
532.Lio_done:
533
534#
535# There is work todo, find out in which context we have been interrupted:
536# 1) if we return to user space we can do all _TIF_WORK work
537# 2) if we return to kernel code and kvm is enabled check if we need to
538#    modify the psw to leave SIE
539# 3) if we return to kernel code and preemptive scheduling is enabled check
540#    the preemption counter and if it is zero call preempt_schedule_irq
541# Before any work can be done, a switch to the kernel stack is required.
542#
543.Lio_work:
544	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
545	jo	.Lio_work_user		# yes -> do resched & signal
546#ifdef CONFIG_PREEMPT
547	# check for preemptive scheduling
548	icm	%r0,15,__TI_precount(%r12)
549	jnz	.Lio_restore		# preemption is disabled
550	tm	__TI_flags+7(%r12),_TIF_NEED_RESCHED
551	jno	.Lio_restore
552	# switch to kernel stack
553	lg	%r1,__PT_R15(%r11)
554	aghi	%r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
555	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
556	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
557	la	%r11,STACK_FRAME_OVERHEAD(%r1)
558	lgr	%r15,%r1
559	# TRACE_IRQS_ON already done at .Lio_return, call
560	# TRACE_IRQS_OFF to keep things symmetrical
561	TRACE_IRQS_OFF
562	brasl	%r14,preempt_schedule_irq
563	j	.Lio_return
564#else
565	j	.Lio_restore
566#endif
567
568#
569# Need to do work before returning to userspace, switch to kernel stack
570#
571.Lio_work_user:
572	lg	%r1,__LC_KERNEL_STACK
573	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
574	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
575	la	%r11,STACK_FRAME_OVERHEAD(%r1)
576	lgr	%r15,%r1
577
578#
579# One of the work bits is on. Find out which one.
580#
581.Lio_work_tif:
582	tm	__LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
583	jo	.Lio_mcck_pending
584	tm	__TI_flags+7(%r12),_TIF_NEED_RESCHED
585	jo	.Lio_reschedule
586	tm	__TI_flags+7(%r12),_TIF_SIGPENDING
587	jo	.Lio_sigpending
588	tm	__TI_flags+7(%r12),_TIF_NOTIFY_RESUME
589	jo	.Lio_notify_resume
590	tm	__LC_CPU_FLAGS+7,_CIF_ASCE
591	jo	.Lio_uaccess
592	j	.Lio_return		# beware of critical section cleanup
593
594#
595# _CIF_MCCK_PENDING is set, call handler
596#
597.Lio_mcck_pending:
598	# TRACE_IRQS_ON already done at .Lio_return
599	brasl	%r14,s390_handle_mcck	# TIF bit will be cleared by handler
600	TRACE_IRQS_OFF
601	j	.Lio_return
602
603#
604# _CIF_ASCE is set, load user space asce
605#
606.Lio_uaccess:
607	ni	__LC_CPU_FLAGS+7,255-_CIF_ASCE
608	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
609	j	.Lio_return
610
611#
612# _TIF_NEED_RESCHED is set, call schedule
613#
614.Lio_reschedule:
615	# TRACE_IRQS_ON already done at .Lio_return
616	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
617	brasl	%r14,schedule		# call scheduler
618	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
619	TRACE_IRQS_OFF
620	j	.Lio_return
621
622#
623# _TIF_SIGPENDING or is set, call do_signal
624#
625.Lio_sigpending:
626	# TRACE_IRQS_ON already done at .Lio_return
627	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
628	lgr	%r2,%r11		# pass pointer to pt_regs
629	brasl	%r14,do_signal
630	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
631	TRACE_IRQS_OFF
632	j	.Lio_return
633
634#
635# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
636#
637.Lio_notify_resume:
638	# TRACE_IRQS_ON already done at .Lio_return
639	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
640	lgr	%r2,%r11		# pass pointer to pt_regs
641	brasl	%r14,do_notify_resume
642	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
643	TRACE_IRQS_OFF
644	j	.Lio_return
645
646/*
647 * External interrupt handler routine
648 */
649ENTRY(ext_int_handler)
650	STCK	__LC_INT_CLOCK
651	stpt	__LC_ASYNC_ENTER_TIMER
652	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
653	lg	%r10,__LC_LAST_BREAK
654	lg	%r12,__LC_THREAD_INFO
655	larl	%r13,system_call
656	lmg	%r8,%r9,__LC_EXT_OLD_PSW
657	HANDLE_SIE_INTERCEPT %r14,3
658	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
659	tmhh	%r8,0x0001		# interrupting from user ?
660	jz	.Lext_skip
661	UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
662	LAST_BREAK %r14
663.Lext_skip:
664	stmg	%r0,%r7,__PT_R0(%r11)
665	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
666	stmg	%r8,%r9,__PT_PSW(%r11)
667	lghi	%r1,__LC_EXT_PARAMS2
668	mvc	__PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
669	mvc	__PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
670	mvc	__PT_INT_PARM_LONG(8,%r11),0(%r1)
671	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
672	TRACE_IRQS_OFF
673	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
674	lgr	%r2,%r11		# pass pointer to pt_regs
675	lghi	%r3,EXT_INTERRUPT
676	brasl	%r14,do_IRQ
677	j	.Lio_return
678
679/*
680 * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
681 */
682ENTRY(psw_idle)
683	stg	%r3,__SF_EMPTY(%r15)
684	larl	%r1,.Lpsw_idle_lpsw+4
685	stg	%r1,__SF_EMPTY+8(%r15)
686	STCK	__CLOCK_IDLE_ENTER(%r2)
687	stpt	__TIMER_IDLE_ENTER(%r2)
688.Lpsw_idle_lpsw:
689	lpswe	__SF_EMPTY(%r15)
690	br	%r14
691.Lpsw_idle_end:
692
693.L__critical_end:
694
695/*
696 * Machine check handler routines
697 */
698ENTRY(mcck_int_handler)
699	STCK	__LC_MCCK_CLOCK
700	la	%r1,4095		# revalidate r1
701	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# revalidate cpu timer
702	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
703	lg	%r10,__LC_LAST_BREAK
704	lg	%r12,__LC_THREAD_INFO
705	larl	%r13,system_call
706	lmg	%r8,%r9,__LC_MCK_OLD_PSW
707	HANDLE_SIE_INTERCEPT %r14,4
708	tm	__LC_MCCK_CODE,0x80	# system damage?
709	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
710	lghi	%r14,__LC_CPU_TIMER_SAVE_AREA
711	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
712	tm	__LC_MCCK_CODE+5,0x02	# stored cpu timer value valid?
713	jo	3f
714	la	%r14,__LC_SYNC_ENTER_TIMER
715	clc	0(8,%r14),__LC_ASYNC_ENTER_TIMER
716	jl	0f
717	la	%r14,__LC_ASYNC_ENTER_TIMER
7180:	clc	0(8,%r14),__LC_EXIT_TIMER
719	jl	1f
720	la	%r14,__LC_EXIT_TIMER
7211:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
722	jl	2f
723	la	%r14,__LC_LAST_UPDATE_TIMER
7242:	spt	0(%r14)
725	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
7263:	tm	__LC_MCCK_CODE+2,0x09	# mwp + ia of old psw valid?
727	jno	.Lmcck_panic		# no -> skip cleanup critical
728	SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT
729	tm	%r8,0x0001		# interrupting from user ?
730	jz	.Lmcck_skip
731	UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
732	LAST_BREAK %r14
733.Lmcck_skip:
734	lghi	%r14,__LC_GPREGS_SAVE_AREA+64
735	stmg	%r0,%r7,__PT_R0(%r11)
736	mvc	__PT_R8(64,%r11),0(%r14)
737	stmg	%r8,%r9,__PT_PSW(%r11)
738	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
739	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
740	lgr	%r2,%r11		# pass pointer to pt_regs
741	brasl	%r14,s390_do_machine_check
742	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
743	jno	.Lmcck_return
744	lg	%r1,__LC_KERNEL_STACK	# switch to kernel stack
745	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
746	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
747	la	%r11,STACK_FRAME_OVERHEAD(%r1)
748	lgr	%r15,%r1
749	ssm	__LC_PGM_NEW_PSW	# turn dat on, keep irqs off
750	tm	__LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
751	jno	.Lmcck_return
752	TRACE_IRQS_OFF
753	brasl	%r14,s390_handle_mcck
754	TRACE_IRQS_ON
755.Lmcck_return:
756	lg	%r14,__LC_VDSO_PER_CPU
757	lmg	%r0,%r10,__PT_R0(%r11)
758	mvc	__LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
759	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
760	jno	0f
761	stpt	__LC_EXIT_TIMER
762	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
7630:	lmg	%r11,%r15,__PT_R11(%r11)
764	lpswe	__LC_RETURN_MCCK_PSW
765
766.Lmcck_panic:
767	lg	%r14,__LC_PANIC_STACK
768	slgr	%r14,%r15
769	srag	%r14,%r14,PAGE_SHIFT
770	jz	0f
771	lg	%r15,__LC_PANIC_STACK
7720:	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
773	j	.Lmcck_skip
774
775#
776# PSW restart interrupt handler
777#
778ENTRY(restart_int_handler)
779	stg	%r15,__LC_SAVE_AREA_RESTART
780	lg	%r15,__LC_RESTART_STACK
781	aghi	%r15,-__PT_SIZE			# create pt_regs on stack
782	xc	0(__PT_SIZE,%r15),0(%r15)
783	stmg	%r0,%r14,__PT_R0(%r15)
784	mvc	__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
785	mvc	__PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
786	aghi	%r15,-STACK_FRAME_OVERHEAD	# create stack frame on stack
787	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
788	lg	%r1,__LC_RESTART_FN		# load fn, parm & source cpu
789	lg	%r2,__LC_RESTART_DATA
790	lg	%r3,__LC_RESTART_SOURCE
791	ltgr	%r3,%r3				# test source cpu address
792	jm	1f				# negative -> skip source stop
7930:	sigp	%r4,%r3,SIGP_SENSE		# sigp sense to source cpu
794	brc	10,0b				# wait for status stored
7951:	basr	%r14,%r1			# call function
796	stap	__SF_EMPTY(%r15)		# store cpu address
797	llgh	%r3,__SF_EMPTY(%r15)
7982:	sigp	%r4,%r3,SIGP_STOP		# sigp stop to current cpu
799	brc	2,2b
8003:	j	3b
801
802	.section .kprobes.text, "ax"
803
804#ifdef CONFIG_CHECK_STACK
805/*
806 * The synchronous or the asynchronous stack overflowed. We are dead.
807 * No need to properly save the registers, we are going to panic anyway.
808 * Setup a pt_regs so that show_trace can provide a good call trace.
809 */
810stack_overflow:
811	lg	%r15,__LC_PANIC_STACK	# change to panic stack
812	la	%r11,STACK_FRAME_OVERHEAD(%r15)
813	stmg	%r0,%r7,__PT_R0(%r11)
814	stmg	%r8,%r9,__PT_PSW(%r11)
815	mvc	__PT_R8(64,%r11),0(%r14)
816	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
817	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
818	lgr	%r2,%r11		# pass pointer to pt_regs
819	jg	kernel_stack_overflow
820#endif
821
822	.align	8
823.Lcleanup_table:
824	.quad	system_call
825	.quad	.Lsysc_do_svc
826	.quad	.Lsysc_tif
827	.quad	.Lsysc_restore
828	.quad	.Lsysc_done
829	.quad	.Lio_tif
830	.quad	.Lio_restore
831	.quad	.Lio_done
832	.quad	psw_idle
833	.quad	.Lpsw_idle_end
834
835cleanup_critical:
836	clg	%r9,BASED(.Lcleanup_table)	# system_call
837	jl	0f
838	clg	%r9,BASED(.Lcleanup_table+8)	# .Lsysc_do_svc
839	jl	.Lcleanup_system_call
840	clg	%r9,BASED(.Lcleanup_table+16)	# .Lsysc_tif
841	jl	0f
842	clg	%r9,BASED(.Lcleanup_table+24)	# .Lsysc_restore
843	jl	.Lcleanup_sysc_tif
844	clg	%r9,BASED(.Lcleanup_table+32)	# .Lsysc_done
845	jl	.Lcleanup_sysc_restore
846	clg	%r9,BASED(.Lcleanup_table+40)	# .Lio_tif
847	jl	0f
848	clg	%r9,BASED(.Lcleanup_table+48)	# .Lio_restore
849	jl	.Lcleanup_io_tif
850	clg	%r9,BASED(.Lcleanup_table+56)	# .Lio_done
851	jl	.Lcleanup_io_restore
852	clg	%r9,BASED(.Lcleanup_table+64)	# psw_idle
853	jl	0f
854	clg	%r9,BASED(.Lcleanup_table+72)	# .Lpsw_idle_end
855	jl	.Lcleanup_idle
8560:	br	%r14
857
858
859.Lcleanup_system_call:
860	# check if stpt has been executed
861	clg	%r9,BASED(.Lcleanup_system_call_insn)
862	jh	0f
863	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
864	cghi	%r11,__LC_SAVE_AREA_ASYNC
865	je	0f
866	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
8670:	# check if stmg has been executed
868	clg	%r9,BASED(.Lcleanup_system_call_insn+8)
869	jh	0f
870	mvc	__LC_SAVE_AREA_SYNC(64),0(%r11)
8710:	# check if base register setup + TIF bit load has been done
872	clg	%r9,BASED(.Lcleanup_system_call_insn+16)
873	jhe	0f
874	# set up saved registers r10 and r12
875	stg	%r10,16(%r11)		# r10 last break
876	stg	%r12,32(%r11)		# r12 thread-info pointer
8770:	# check if the user time update has been done
878	clg	%r9,BASED(.Lcleanup_system_call_insn+24)
879	jh	0f
880	lg	%r15,__LC_EXIT_TIMER
881	slg	%r15,__LC_SYNC_ENTER_TIMER
882	alg	%r15,__LC_USER_TIMER
883	stg	%r15,__LC_USER_TIMER
8840:	# check if the system time update has been done
885	clg	%r9,BASED(.Lcleanup_system_call_insn+32)
886	jh	0f
887	lg	%r15,__LC_LAST_UPDATE_TIMER
888	slg	%r15,__LC_EXIT_TIMER
889	alg	%r15,__LC_SYSTEM_TIMER
890	stg	%r15,__LC_SYSTEM_TIMER
8910:	# update accounting time stamp
892	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
893	# do LAST_BREAK
894	lg	%r9,16(%r11)
895	srag	%r9,%r9,23
896	jz	0f
897	mvc	__TI_last_break(8,%r12),16(%r11)
8980:	# set up saved register r11
899	lg	%r15,__LC_KERNEL_STACK
900	la	%r9,STACK_FRAME_OVERHEAD(%r15)
901	stg	%r9,24(%r11)		# r11 pt_regs pointer
902	# fill pt_regs
903	mvc	__PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
904	stmg	%r0,%r7,__PT_R0(%r9)
905	mvc	__PT_PSW(16,%r9),__LC_SVC_OLD_PSW
906	mvc	__PT_INT_CODE(4,%r9),__LC_SVC_ILC
907	xc	__PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
908	mvi	__PT_FLAGS+7(%r9),_PIF_SYSCALL
909	# setup saved register r15
910	stg	%r15,56(%r11)		# r15 stack pointer
911	# set new psw address and exit
912	larl	%r9,.Lsysc_do_svc
913	br	%r14
914.Lcleanup_system_call_insn:
915	.quad	system_call
916	.quad	.Lsysc_stmg
917	.quad	.Lsysc_per
918	.quad	.Lsysc_vtime+18
919	.quad	.Lsysc_vtime+42
920
921.Lcleanup_sysc_tif:
922	larl	%r9,.Lsysc_tif
923	br	%r14
924
925.Lcleanup_sysc_restore:
926	clg	%r9,BASED(.Lcleanup_sysc_restore_insn)
927	je	0f
928	lg	%r9,24(%r11)		# get saved pointer to pt_regs
929	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r9)
930	mvc	0(64,%r11),__PT_R8(%r9)
931	lmg	%r0,%r7,__PT_R0(%r9)
9320:	lmg	%r8,%r9,__LC_RETURN_PSW
933	br	%r14
934.Lcleanup_sysc_restore_insn:
935	.quad	.Lsysc_done - 4
936
937.Lcleanup_io_tif:
938	larl	%r9,.Lio_tif
939	br	%r14
940
941.Lcleanup_io_restore:
942	clg	%r9,BASED(.Lcleanup_io_restore_insn)
943	je	0f
944	lg	%r9,24(%r11)		# get saved r11 pointer to pt_regs
945	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r9)
946	mvc	0(64,%r11),__PT_R8(%r9)
947	lmg	%r0,%r7,__PT_R0(%r9)
9480:	lmg	%r8,%r9,__LC_RETURN_PSW
949	br	%r14
950.Lcleanup_io_restore_insn:
951	.quad	.Lio_done - 4
952
953.Lcleanup_idle:
954	# copy interrupt clock & cpu timer
955	mvc	__CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
956	mvc	__TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
957	cghi	%r11,__LC_SAVE_AREA_ASYNC
958	je	0f
959	mvc	__CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
960	mvc	__TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
9610:	# check if stck & stpt have been executed
962	clg	%r9,BASED(.Lcleanup_idle_insn)
963	jhe	1f
964	mvc	__CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
965	mvc	__TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
9661:	# account system time going idle
967	lg	%r9,__LC_STEAL_TIMER
968	alg	%r9,__CLOCK_IDLE_ENTER(%r2)
969	slg	%r9,__LC_LAST_UPDATE_CLOCK
970	stg	%r9,__LC_STEAL_TIMER
971	mvc	__LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
972	lg	%r9,__LC_SYSTEM_TIMER
973	alg	%r9,__LC_LAST_UPDATE_TIMER
974	slg	%r9,__TIMER_IDLE_ENTER(%r2)
975	stg	%r9,__LC_SYSTEM_TIMER
976	mvc	__LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
977	# prepare return psw
978	nihh	%r8,0xfcfd		# clear irq & wait state bits
979	lg	%r9,48(%r11)		# return from psw_idle
980	br	%r14
981.Lcleanup_idle_insn:
982	.quad	.Lpsw_idle_lpsw
983
984/*
985 * Integer constants
986 */
987	.align	8
988.Lcritical_start:
989	.quad	.L__critical_start
990.Lcritical_length:
991	.quad	.L__critical_end - .L__critical_start
992
993
994#if IS_ENABLED(CONFIG_KVM)
995/*
996 * sie64a calling convention:
997 * %r2 pointer to sie control block
998 * %r3 guest register save area
999 */
1000ENTRY(sie64a)
1001	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
1002	stg	%r2,__SF_EMPTY(%r15)		# save control block pointer
1003	stg	%r3,__SF_EMPTY+8(%r15)		# save guest register save area
1004	xc	__SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason
1005	lmg	%r0,%r13,0(%r3)			# load guest gprs 0-13
1006	lg	%r14,__LC_GMAP			# get gmap pointer
1007	ltgr	%r14,%r14
1008	jz	.Lsie_gmap
1009	lctlg	%c1,%c1,__GMAP_ASCE(%r14)	# load primary asce
1010.Lsie_gmap:
1011	lg	%r14,__SF_EMPTY(%r15)		# get control block pointer
1012	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
1013	tm	__SIE_PROG20+3(%r14),3		# last exit...
1014	jnz	.Lsie_done
1015	LPP	__SF_EMPTY(%r15)		# set guest id
1016	sie	0(%r14)
1017.Lsie_done:
1018	LPP	__SF_EMPTY+16(%r15)		# set host id
1019	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
1020	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
1021# some program checks are suppressing. C code (e.g. do_protection_exception)
1022# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
1023# instructions between sie64a and .Lsie_done should not cause program
1024# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
1025# See also HANDLE_SIE_INTERCEPT
1026.Lrewind_pad:
1027	nop	0
1028	.globl sie_exit
1029sie_exit:
1030	lg	%r14,__SF_EMPTY+8(%r15)		# load guest register save area
1031	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
1032	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
1033	lg	%r2,__SF_EMPTY+24(%r15)		# return exit reason code
1034	br	%r14
1035.Lsie_fault:
1036	lghi	%r14,-EFAULT
1037	stg	%r14,__SF_EMPTY+24(%r15)	# set exit reason code
1038	j	sie_exit
1039
1040	.align	8
1041.Lsie_critical:
1042	.quad	.Lsie_gmap
1043.Lsie_critical_length:
1044	.quad	.Lsie_done - .Lsie_gmap
1045
1046	EX_TABLE(.Lrewind_pad,.Lsie_fault)
1047	EX_TABLE(sie_exit,.Lsie_fault)
1048#endif
1049
1050	.section .rodata, "a"
1051#define SYSCALL(esame,emu)	.long esame
1052	.globl	sys_call_table
1053sys_call_table:
1054#include "syscalls.S"
1055#undef SYSCALL
1056
1057#ifdef CONFIG_COMPAT
1058
1059#define SYSCALL(esame,emu)	.long emu
1060	.globl	sys_call_table_emu
1061sys_call_table_emu:
1062#include "syscalls.S"
1063#undef SYSCALL
1064#endif
1065