1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * S390 low-level entry points. 4 * 5 * Copyright IBM Corp. 1999, 2012 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Hartmut Penner (hp@de.ibm.com), 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 9 * Heiko Carstens <heiko.carstens@de.ibm.com> 10 */ 11 12#include <linux/init.h> 13#include <linux/linkage.h> 14#include <asm/alternative-asm.h> 15#include <asm/processor.h> 16#include <asm/cache.h> 17#include <asm/ctl_reg.h> 18#include <asm/dwarf.h> 19#include <asm/errno.h> 20#include <asm/ptrace.h> 21#include <asm/thread_info.h> 22#include <asm/asm-offsets.h> 23#include <asm/unistd.h> 24#include <asm/page.h> 25#include <asm/sigp.h> 26#include <asm/irq.h> 27#include <asm/vx-insn.h> 28#include <asm/setup.h> 29#include <asm/nmi.h> 30#include <asm/export.h> 31#include <asm/nospec-insn.h> 32 33__PT_R0 = __PT_GPRS 34__PT_R1 = __PT_GPRS + 8 35__PT_R2 = __PT_GPRS + 16 36__PT_R3 = __PT_GPRS + 24 37__PT_R4 = __PT_GPRS + 32 38__PT_R5 = __PT_GPRS + 40 39__PT_R6 = __PT_GPRS + 48 40__PT_R7 = __PT_GPRS + 56 41__PT_R8 = __PT_GPRS + 64 42__PT_R9 = __PT_GPRS + 72 43__PT_R10 = __PT_GPRS + 80 44__PT_R11 = __PT_GPRS + 88 45__PT_R12 = __PT_GPRS + 96 46__PT_R13 = __PT_GPRS + 104 47__PT_R14 = __PT_GPRS + 112 48__PT_R15 = __PT_GPRS + 120 49 50STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER 51STACK_SIZE = 1 << STACK_SHIFT 52STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE 53 54_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 55 _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING) 56_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ 57 _TIF_SYSCALL_TRACEPOINT) 58_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \ 59 _CIF_ASCE_SECONDARY | _CIF_FPU) 60_PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART) 61 62_LPP_OFFSET = __LC_LPP 63 64#define BASED(name) name-cleanup_critical(%r13) 65 66 .macro TRACE_IRQS_ON 67#ifdef CONFIG_TRACE_IRQFLAGS 68 basr %r2,%r0 69 brasl %r14,trace_hardirqs_on_caller 70#endif 71 .endm 72 73 .macro TRACE_IRQS_OFF 74#ifdef CONFIG_TRACE_IRQFLAGS 75 basr %r2,%r0 76 brasl %r14,trace_hardirqs_off_caller 77#endif 78 .endm 79 80 .macro LOCKDEP_SYS_EXIT 81#ifdef CONFIG_LOCKDEP 82 tm __PT_PSW+1(%r11),0x01 # returning to user ? 83 jz .+10 84 brasl %r14,lockdep_sys_exit 85#endif 86 .endm 87 88 .macro CHECK_STACK savearea 89#ifdef CONFIG_CHECK_STACK 90 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 91 lghi %r14,\savearea 92 jz stack_overflow 93#endif 94 .endm 95 96 .macro CHECK_VMAP_STACK savearea,oklabel 97#ifdef CONFIG_VMAP_STACK 98 lgr %r14,%r15 99 nill %r14,0x10000 - STACK_SIZE 100 oill %r14,STACK_INIT 101 clg %r14,__LC_KERNEL_STACK 102 je \oklabel 103 clg %r14,__LC_ASYNC_STACK 104 je \oklabel 105 clg %r14,__LC_NODAT_STACK 106 je \oklabel 107 clg %r14,__LC_RESTART_STACK 108 je \oklabel 109 lghi %r14,\savearea 110 j stack_overflow 111#else 112 j \oklabel 113#endif 114 .endm 115 116 .macro SWITCH_ASYNC savearea,timer 117 tmhh %r8,0x0001 # interrupting from user ? 118 jnz 2f 119 lgr %r14,%r9 120 cghi %r14,__LC_RETURN_LPSWE 121 je 0f 122 slg %r14,BASED(.Lcritical_start) 123 clg %r14,BASED(.Lcritical_length) 124 jhe 1f 1250: 126 lghi %r11,\savearea # inside critical section, do cleanup 127 brasl %r14,cleanup_critical 128 tmhh %r8,0x0001 # retest problem state after cleanup 129 jnz 2f 1301: lg %r14,__LC_ASYNC_STACK # are we already on the target stack? 131 slgr %r14,%r15 132 srag %r14,%r14,STACK_SHIFT 133 jnz 3f 134 CHECK_STACK \savearea 135 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 136 j 4f 1372: UPDATE_VTIME %r14,%r15,\timer 138 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 1393: lg %r15,__LC_ASYNC_STACK # load async stack 1404: la %r11,STACK_FRAME_OVERHEAD(%r15) 141 .endm 142 143 .macro UPDATE_VTIME w1,w2,enter_timer 144 lg \w1,__LC_EXIT_TIMER 145 lg \w2,__LC_LAST_UPDATE_TIMER 146 slg \w1,\enter_timer 147 slg \w2,__LC_EXIT_TIMER 148 alg \w1,__LC_USER_TIMER 149 alg \w2,__LC_SYSTEM_TIMER 150 stg \w1,__LC_USER_TIMER 151 stg \w2,__LC_SYSTEM_TIMER 152 mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer 153 .endm 154 155 .macro REENABLE_IRQS 156 stg %r8,__LC_RETURN_PSW 157 ni __LC_RETURN_PSW,0xbf 158 ssm __LC_RETURN_PSW 159 .endm 160 161 .macro STCK savearea 162#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES 163 .insn s,0xb27c0000,\savearea # store clock fast 164#else 165 .insn s,0xb2050000,\savearea # store clock 166#endif 167 .endm 168 169 /* 170 * The TSTMSK macro generates a test-under-mask instruction by 171 * calculating the memory offset for the specified mask value. 172 * Mask value can be any constant. The macro shifts the mask 173 * value to calculate the memory offset for the test-under-mask 174 * instruction. 175 */ 176 .macro TSTMSK addr, mask, size=8, bytepos=0 177 .if (\bytepos < \size) && (\mask >> 8) 178 .if (\mask & 0xff) 179 .error "Mask exceeds byte boundary" 180 .endif 181 TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)" 182 .exitm 183 .endif 184 .ifeq \mask 185 .error "Mask must not be zero" 186 .endif 187 off = \size - \bytepos - 1 188 tm off+\addr, \mask 189 .endm 190 191 .macro BPOFF 192 ALTERNATIVE "", ".long 0xb2e8c000", 82 193 .endm 194 195 .macro BPON 196 ALTERNATIVE "", ".long 0xb2e8d000", 82 197 .endm 198 199 .macro BPENTER tif_ptr,tif_mask 200 ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \ 201 "", 82 202 .endm 203 204 .macro BPEXIT tif_ptr,tif_mask 205 TSTMSK \tif_ptr,\tif_mask 206 ALTERNATIVE "jz .+8; .long 0xb2e8c000", \ 207 "jnz .+8; .long 0xb2e8d000", 82 208 .endm 209 210 GEN_BR_THUNK %r9 211 GEN_BR_THUNK %r14 212 GEN_BR_THUNK %r14,%r11 213 214 .section .kprobes.text, "ax" 215.Ldummy: 216 /* 217 * This nop exists only in order to avoid that __switch_to starts at 218 * the beginning of the kprobes text section. In that case we would 219 * have several symbols at the same address. E.g. objdump would take 220 * an arbitrary symbol name when disassembling this code. 221 * With the added nop in between the __switch_to symbol is unique 222 * again. 223 */ 224 nop 0 225 226ENTRY(__bpon) 227 .globl __bpon 228 BPON 229 BR_EX %r14 230ENDPROC(__bpon) 231 232/* 233 * Scheduler resume function, called by switch_to 234 * gpr2 = (task_struct *) prev 235 * gpr3 = (task_struct *) next 236 * Returns: 237 * gpr2 = prev 238 */ 239ENTRY(__switch_to) 240 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 241 lghi %r4,__TASK_stack 242 lghi %r1,__TASK_thread 243 llill %r5,STACK_INIT 244 stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev 245 lg %r15,0(%r4,%r3) # start of kernel stack of next 246 agr %r15,%r5 # end of kernel stack of next 247 stg %r3,__LC_CURRENT # store task struct of next 248 stg %r15,__LC_KERNEL_STACK # store end of kernel stack 249 lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next 250 aghi %r3,__TASK_pid 251 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next 252 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 253 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40 254 BR_EX %r14 255ENDPROC(__switch_to) 256 257.L__critical_start: 258 259#if IS_ENABLED(CONFIG_KVM) 260/* 261 * sie64a calling convention: 262 * %r2 pointer to sie control block 263 * %r3 guest register save area 264 */ 265ENTRY(sie64a) 266 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers 267 lg %r12,__LC_CURRENT 268 stg %r2,__SF_SIE_CONTROL(%r15) # save control block pointer 269 stg %r3,__SF_SIE_SAVEAREA(%r15) # save guest register save area 270 xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0 271 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags 272 TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ? 273 jno .Lsie_load_guest_gprs 274 brasl %r14,load_fpu_regs # load guest fp/vx regs 275.Lsie_load_guest_gprs: 276 lmg %r0,%r13,0(%r3) # load guest gprs 0-13 277 lg %r14,__LC_GMAP # get gmap pointer 278 ltgr %r14,%r14 279 jz .Lsie_gmap 280 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce 281.Lsie_gmap: 282 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 283 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now 284 tm __SIE_PROG20+3(%r14),3 # last exit... 285 jnz .Lsie_skip 286 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 287 jo .Lsie_skip # exit if fp/vx regs changed 288 BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 289.Lsie_entry: 290 sie 0(%r14) 291.Lsie_exit: 292 BPOFF 293 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 294.Lsie_skip: 295 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 296 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 297.Lsie_done: 298# some program checks are suppressing. C code (e.g. do_protection_exception) 299# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There 300# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. 301# Other instructions between sie64a and .Lsie_done should not cause program 302# interrupts. So lets use 3 nops as a landing pad for all possible rewinds. 303# See also .Lcleanup_sie 304.Lrewind_pad6: 305 nopr 7 306.Lrewind_pad4: 307 nopr 7 308.Lrewind_pad2: 309 nopr 7 310 .globl sie_exit 311sie_exit: 312 lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area 313 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 314 xgr %r0,%r0 # clear guest registers to 315 xgr %r1,%r1 # prevent speculative use 316 xgr %r2,%r2 317 xgr %r3,%r3 318 xgr %r4,%r4 319 xgr %r5,%r5 320 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 321 lg %r2,__SF_SIE_REASON(%r15) # return exit reason code 322 BR_EX %r14 323.Lsie_fault: 324 lghi %r14,-EFAULT 325 stg %r14,__SF_SIE_REASON(%r15) # set exit reason code 326 j sie_exit 327 328 EX_TABLE(.Lrewind_pad6,.Lsie_fault) 329 EX_TABLE(.Lrewind_pad4,.Lsie_fault) 330 EX_TABLE(.Lrewind_pad2,.Lsie_fault) 331 EX_TABLE(sie_exit,.Lsie_fault) 332ENDPROC(sie64a) 333EXPORT_SYMBOL(sie64a) 334EXPORT_SYMBOL(sie_exit) 335#endif 336 337/* 338 * SVC interrupt handler routine. System calls are synchronous events and 339 * are executed with interrupts enabled. 340 */ 341 342ENTRY(system_call) 343 stpt __LC_SYNC_ENTER_TIMER 344.Lsysc_stmg: 345 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 346 BPOFF 347 lg %r12,__LC_CURRENT 348 lghi %r13,__TASK_thread 349 lghi %r14,_PIF_SYSCALL 350.Lsysc_per: 351 lg %r15,__LC_KERNEL_STACK 352 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 353.Lsysc_vtime: 354 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER 355 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 356 stmg %r0,%r7,__PT_R0(%r11) 357 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 358 mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW 359 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC 360 stg %r14,__PT_FLAGS(%r11) 361.Lsysc_do_svc: 362 # clear user controlled register to prevent speculative use 363 xgr %r0,%r0 364 # load address of system call table 365 lg %r10,__THREAD_sysc_table(%r13,%r12) 366 llgh %r8,__PT_INT_CODE+2(%r11) 367 slag %r8,%r8,3 # shift and test for svc 0 368 jnz .Lsysc_nr_ok 369 # svc 0: system call number in %r1 370 llgfr %r1,%r1 # clear high word in r1 371 cghi %r1,NR_syscalls 372 jnl .Lsysc_nr_ok 373 sth %r1,__PT_INT_CODE+2(%r11) 374 slag %r8,%r1,3 375.Lsysc_nr_ok: 376 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 377 stg %r2,__PT_ORIG_GPR2(%r11) 378 stg %r7,STACK_FRAME_OVERHEAD(%r15) 379 lg %r9,0(%r8,%r10) # get system call add. 380 TSTMSK __TI_flags(%r12),_TIF_TRACE 381 jnz .Lsysc_tracesys 382 BASR_EX %r14,%r9 # call sys_xxxx 383 stg %r2,__PT_R2(%r11) # store return value 384 385.Lsysc_return: 386#ifdef CONFIG_DEBUG_RSEQ 387 lgr %r2,%r11 388 brasl %r14,rseq_syscall 389#endif 390 LOCKDEP_SYS_EXIT 391.Lsysc_tif: 392 TSTMSK __PT_FLAGS(%r11),_PIF_WORK 393 jnz .Lsysc_work 394 TSTMSK __TI_flags(%r12),_TIF_WORK 395 jnz .Lsysc_work # check for work 396 TSTMSK __LC_CPU_FLAGS,_CIF_WORK 397 jnz .Lsysc_work 398 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 399.Lsysc_restore: 400 lg %r14,__LC_VDSO_PER_CPU 401 lmg %r0,%r10,__PT_R0(%r11) 402 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 403.Lsysc_exit_timer: 404 stpt __LC_EXIT_TIMER 405 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 406 lmg %r11,%r15,__PT_R11(%r11) 407 b __LC_RETURN_LPSWE(%r0) 408.Lsysc_done: 409 410# 411# One of the work bits is on. Find out which one. 412# 413.Lsysc_work: 414 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING 415 jo .Lsysc_mcck_pending 416 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 417 jo .Lsysc_reschedule 418 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART 419 jo .Lsysc_syscall_restart 420#ifdef CONFIG_UPROBES 421 TSTMSK __TI_flags(%r12),_TIF_UPROBE 422 jo .Lsysc_uprobe_notify 423#endif 424 TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE 425 jo .Lsysc_guarded_storage 426 TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP 427 jo .Lsysc_singlestep 428#ifdef CONFIG_LIVEPATCH 429 TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING 430 jo .Lsysc_patch_pending # handle live patching just before 431 # signals and possible syscall restart 432#endif 433 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART 434 jo .Lsysc_syscall_restart 435 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING 436 jo .Lsysc_sigpending 437 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME 438 jo .Lsysc_notify_resume 439 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 440 jo .Lsysc_vxrs 441 TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY) 442 jnz .Lsysc_asce 443 j .Lsysc_return # beware of critical section cleanup 444 445# 446# _TIF_NEED_RESCHED is set, call schedule 447# 448.Lsysc_reschedule: 449 larl %r14,.Lsysc_return 450 jg schedule 451 452# 453# _CIF_MCCK_PENDING is set, call handler 454# 455.Lsysc_mcck_pending: 456 larl %r14,.Lsysc_return 457 jg s390_handle_mcck # TIF bit will be cleared by handler 458 459# 460# _CIF_ASCE_PRIMARY and/or _CIF_ASCE_SECONDARY set, load user space asce 461# 462.Lsysc_asce: 463 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY 464 lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce 465 TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY 466 jz .Lsysc_return 467#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES 468 tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ? 469 jnz .Lsysc_set_fs_fixup 470 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY 471 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 472 j .Lsysc_return 473.Lsysc_set_fs_fixup: 474#endif 475 larl %r14,.Lsysc_return 476 jg set_fs_fixup 477 478# 479# CIF_FPU is set, restore floating-point controls and floating-point registers. 480# 481.Lsysc_vxrs: 482 larl %r14,.Lsysc_return 483 jg load_fpu_regs 484 485# 486# _TIF_SIGPENDING is set, call do_signal 487# 488.Lsysc_sigpending: 489 lgr %r2,%r11 # pass pointer to pt_regs 490 brasl %r14,do_signal 491 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL 492 jno .Lsysc_return 493.Lsysc_do_syscall: 494 lghi %r13,__TASK_thread 495 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments 496 lghi %r1,0 # svc 0 returns -ENOSYS 497 j .Lsysc_do_svc 498 499# 500# _TIF_NOTIFY_RESUME is set, call do_notify_resume 501# 502.Lsysc_notify_resume: 503 lgr %r2,%r11 # pass pointer to pt_regs 504 larl %r14,.Lsysc_return 505 jg do_notify_resume 506 507# 508# _TIF_UPROBE is set, call uprobe_notify_resume 509# 510#ifdef CONFIG_UPROBES 511.Lsysc_uprobe_notify: 512 lgr %r2,%r11 # pass pointer to pt_regs 513 larl %r14,.Lsysc_return 514 jg uprobe_notify_resume 515#endif 516 517# 518# _TIF_GUARDED_STORAGE is set, call guarded_storage_load 519# 520.Lsysc_guarded_storage: 521 lgr %r2,%r11 # pass pointer to pt_regs 522 larl %r14,.Lsysc_return 523 jg gs_load_bc_cb 524# 525# _TIF_PATCH_PENDING is set, call klp_update_patch_state 526# 527#ifdef CONFIG_LIVEPATCH 528.Lsysc_patch_pending: 529 lg %r2,__LC_CURRENT # pass pointer to task struct 530 larl %r14,.Lsysc_return 531 jg klp_update_patch_state 532#endif 533 534# 535# _PIF_PER_TRAP is set, call do_per_trap 536# 537.Lsysc_singlestep: 538 ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP 539 lgr %r2,%r11 # pass pointer to pt_regs 540 larl %r14,.Lsysc_return 541 jg do_per_trap 542 543# 544# _PIF_SYSCALL_RESTART is set, repeat the current system call 545# 546.Lsysc_syscall_restart: 547 ni __PT_FLAGS+7(%r11),255-_PIF_SYSCALL_RESTART 548 lmg %r1,%r7,__PT_R1(%r11) # load svc arguments 549 lg %r2,__PT_ORIG_GPR2(%r11) 550 j .Lsysc_do_svc 551 552# 553# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before 554# and after the system call 555# 556.Lsysc_tracesys: 557 lgr %r2,%r11 # pass pointer to pt_regs 558 la %r3,0 559 llgh %r0,__PT_INT_CODE+2(%r11) 560 stg %r0,__PT_R2(%r11) 561 brasl %r14,do_syscall_trace_enter 562 lghi %r0,NR_syscalls 563 clgr %r0,%r2 564 jnh .Lsysc_tracenogo 565 sllg %r8,%r2,3 566 lg %r9,0(%r8,%r10) 567.Lsysc_tracego: 568 lmg %r3,%r7,__PT_R3(%r11) 569 stg %r7,STACK_FRAME_OVERHEAD(%r15) 570 lg %r2,__PT_ORIG_GPR2(%r11) 571 BASR_EX %r14,%r9 # call sys_xxx 572 stg %r2,__PT_R2(%r11) # store return value 573.Lsysc_tracenogo: 574 TSTMSK __TI_flags(%r12),_TIF_TRACE 575 jz .Lsysc_return 576 lgr %r2,%r11 # pass pointer to pt_regs 577 larl %r14,.Lsysc_return 578 jg do_syscall_trace_exit 579ENDPROC(system_call) 580 581# 582# a new process exits the kernel with ret_from_fork 583# 584ENTRY(ret_from_fork) 585 la %r11,STACK_FRAME_OVERHEAD(%r15) 586 lg %r12,__LC_CURRENT 587 brasl %r14,schedule_tail 588 TRACE_IRQS_ON 589 ssm __LC_SVC_NEW_PSW # reenable interrupts 590 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? 591 jne .Lsysc_tracenogo 592 # it's a kernel thread 593 lmg %r9,%r10,__PT_R9(%r11) # load gprs 594 la %r2,0(%r10) 595 BASR_EX %r14,%r9 596 j .Lsysc_tracenogo 597ENDPROC(ret_from_fork) 598 599ENTRY(kernel_thread_starter) 600 la %r2,0(%r10) 601 BASR_EX %r14,%r9 602 j .Lsysc_tracenogo 603ENDPROC(kernel_thread_starter) 604 605/* 606 * Program check handler routine 607 */ 608 609ENTRY(pgm_check_handler) 610 stpt __LC_SYNC_ENTER_TIMER 611 BPOFF 612 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 613 lg %r10,__LC_LAST_BREAK 614 srag %r11,%r10,12 615 jnz 0f 616 /* if __LC_LAST_BREAK is < 4096, it contains one of 617 * the lpswe addresses in lowcore. Set it to 1 (initial state) 618 * to prevent leaking that address to userspace. 619 */ 620 lghi %r10,1 6210: lg %r12,__LC_CURRENT 622 lghi %r11,0 623 larl %r13,cleanup_critical 624 lmg %r8,%r9,__LC_PGM_OLD_PSW 625 tmhh %r8,0x0001 # test problem state bit 626 jnz 3f # -> fault in user space 627#if IS_ENABLED(CONFIG_KVM) 628 # cleanup critical section for program checks in sie64a 629 lgr %r14,%r9 630 slg %r14,BASED(.Lsie_critical_start) 631 clg %r14,BASED(.Lsie_critical_length) 632 jhe 1f 633 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 634 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 635 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 636 larl %r9,sie_exit # skip forward to sie_exit 637 lghi %r11,_PIF_GUEST_FAULT 638#endif 6391: tmhh %r8,0x4000 # PER bit set in old PSW ? 640 jnz 2f # -> enabled, can't be a double fault 641 tm __LC_PGM_ILC+3,0x80 # check for per exception 642 jnz .Lpgm_svcper # -> single stepped svc 6432: CHECK_STACK __LC_SAVE_AREA_SYNC 644 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 645 # CHECK_VMAP_STACK branches to stack_overflow or 5f 646 CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,5f 6473: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER 648 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 649 lg %r15,__LC_KERNEL_STACK 650 lgr %r14,%r12 651 aghi %r14,__TASK_thread # pointer to thread_struct 652 lghi %r13,__LC_PGM_TDB 653 tm __LC_PGM_ILC+2,0x02 # check for transaction abort 654 jz 4f 655 mvc __THREAD_trap_tdb(256,%r14),0(%r13) 6564: stg %r10,__THREAD_last_break(%r14) 6575: lgr %r13,%r11 658 la %r11,STACK_FRAME_OVERHEAD(%r15) 659 stmg %r0,%r7,__PT_R0(%r11) 660 # clear user controlled registers to prevent speculative use 661 xgr %r0,%r0 662 xgr %r1,%r1 663 xgr %r2,%r2 664 xgr %r3,%r3 665 xgr %r4,%r4 666 xgr %r5,%r5 667 xgr %r6,%r6 668 xgr %r7,%r7 669 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 670 stmg %r8,%r9,__PT_PSW(%r11) 671 mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC 672 mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE 673 stg %r13,__PT_FLAGS(%r11) 674 stg %r10,__PT_ARGS(%r11) 675 tm __LC_PGM_ILC+3,0x80 # check for per exception 676 jz 6f 677 tmhh %r8,0x0001 # kernel per event ? 678 jz .Lpgm_kprobe 679 oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP 680 mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS 681 mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE 682 mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID 6836: REENABLE_IRQS 684 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 685 larl %r1,pgm_check_table 686 llgh %r10,__PT_INT_CODE+2(%r11) 687 nill %r10,0x007f 688 sll %r10,3 689 je .Lpgm_return 690 lg %r9,0(%r10,%r1) # load address of handler routine 691 lgr %r2,%r11 # pass pointer to pt_regs 692 BASR_EX %r14,%r9 # branch to interrupt-handler 693.Lpgm_return: 694 LOCKDEP_SYS_EXIT 695 tm __PT_PSW+1(%r11),0x01 # returning to user ? 696 jno .Lsysc_restore 697 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL 698 jo .Lsysc_do_syscall 699 j .Lsysc_tif 700 701# 702# PER event in supervisor state, must be kprobes 703# 704.Lpgm_kprobe: 705 REENABLE_IRQS 706 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 707 lgr %r2,%r11 # pass pointer to pt_regs 708 brasl %r14,do_per_trap 709 j .Lpgm_return 710 711# 712# single stepped system call 713# 714.Lpgm_svcper: 715 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW 716 lghi %r13,__TASK_thread 717 larl %r14,.Lsysc_per 718 stg %r14,__LC_RETURN_PSW+8 719 lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP 720 lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs 721ENDPROC(pgm_check_handler) 722 723/* 724 * IO interrupt handler routine 725 */ 726ENTRY(io_int_handler) 727 STCK __LC_INT_CLOCK 728 stpt __LC_ASYNC_ENTER_TIMER 729 BPOFF 730 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 731 lg %r12,__LC_CURRENT 732 larl %r13,cleanup_critical 733 lmg %r8,%r9,__LC_IO_OLD_PSW 734 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER 735 stmg %r0,%r7,__PT_R0(%r11) 736 # clear user controlled registers to prevent speculative use 737 xgr %r0,%r0 738 xgr %r1,%r1 739 xgr %r2,%r2 740 xgr %r3,%r3 741 xgr %r4,%r4 742 xgr %r5,%r5 743 xgr %r6,%r6 744 xgr %r7,%r7 745 xgr %r10,%r10 746 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 747 stmg %r8,%r9,__PT_PSW(%r11) 748 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID 749 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 750 TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ 751 jo .Lio_restore 752 TRACE_IRQS_OFF 753 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 754.Lio_loop: 755 lgr %r2,%r11 # pass pointer to pt_regs 756 lghi %r3,IO_INTERRUPT 757 tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? 758 jz .Lio_call 759 lghi %r3,THIN_INTERRUPT 760.Lio_call: 761 brasl %r14,do_IRQ 762 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR 763 jz .Lio_return 764 tpi 0 765 jz .Lio_return 766 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID 767 j .Lio_loop 768.Lio_return: 769 LOCKDEP_SYS_EXIT 770 TRACE_IRQS_ON 771.Lio_tif: 772 TSTMSK __TI_flags(%r12),_TIF_WORK 773 jnz .Lio_work # there is work to do (signals etc.) 774 TSTMSK __LC_CPU_FLAGS,_CIF_WORK 775 jnz .Lio_work 776.Lio_restore: 777 lg %r14,__LC_VDSO_PER_CPU 778 lmg %r0,%r10,__PT_R0(%r11) 779 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 780 tm __PT_PSW+1(%r11),0x01 # returning to user ? 781 jno .Lio_exit_kernel 782 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 783.Lio_exit_timer: 784 stpt __LC_EXIT_TIMER 785 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 786.Lio_exit_kernel: 787 lmg %r11,%r15,__PT_R11(%r11) 788 b __LC_RETURN_LPSWE(%r0) 789.Lio_done: 790 791# 792# There is work todo, find out in which context we have been interrupted: 793# 1) if we return to user space we can do all _TIF_WORK work 794# 2) if we return to kernel code and kvm is enabled check if we need to 795# modify the psw to leave SIE 796# 3) if we return to kernel code and preemptive scheduling is enabled check 797# the preemption counter and if it is zero call preempt_schedule_irq 798# Before any work can be done, a switch to the kernel stack is required. 799# 800.Lio_work: 801 tm __PT_PSW+1(%r11),0x01 # returning to user ? 802 jo .Lio_work_user # yes -> do resched & signal 803#ifdef CONFIG_PREEMPTION 804 # check for preemptive scheduling 805 icm %r0,15,__LC_PREEMPT_COUNT 806 jnz .Lio_restore # preemption is disabled 807 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 808 jno .Lio_restore 809 # switch to kernel stack 810 lg %r1,__PT_R15(%r11) 811 aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 812 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 813 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 814 la %r11,STACK_FRAME_OVERHEAD(%r1) 815 lgr %r15,%r1 816 # TRACE_IRQS_ON already done at .Lio_return, call 817 # TRACE_IRQS_OFF to keep things symmetrical 818 TRACE_IRQS_OFF 819 brasl %r14,preempt_schedule_irq 820 j .Lio_return 821#else 822 j .Lio_restore 823#endif 824 825# 826# Need to do work before returning to userspace, switch to kernel stack 827# 828.Lio_work_user: 829 lg %r1,__LC_KERNEL_STACK 830 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 831 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 832 la %r11,STACK_FRAME_OVERHEAD(%r1) 833 lgr %r15,%r1 834 835# 836# One of the work bits is on. Find out which one. 837# 838.Lio_work_tif: 839 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING 840 jo .Lio_mcck_pending 841 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 842 jo .Lio_reschedule 843#ifdef CONFIG_LIVEPATCH 844 TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING 845 jo .Lio_patch_pending 846#endif 847 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING 848 jo .Lio_sigpending 849 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME 850 jo .Lio_notify_resume 851 TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE 852 jo .Lio_guarded_storage 853 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 854 jo .Lio_vxrs 855 TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY) 856 jnz .Lio_asce 857 j .Lio_return # beware of critical section cleanup 858 859# 860# _CIF_MCCK_PENDING is set, call handler 861# 862.Lio_mcck_pending: 863 # TRACE_IRQS_ON already done at .Lio_return 864 brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler 865 TRACE_IRQS_OFF 866 j .Lio_return 867 868# 869# _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce 870# 871.Lio_asce: 872 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY 873 lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce 874 TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY 875 jz .Lio_return 876#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES 877 tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ? 878 jnz .Lio_set_fs_fixup 879 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY 880 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 881 j .Lio_return 882.Lio_set_fs_fixup: 883#endif 884 larl %r14,.Lio_return 885 jg set_fs_fixup 886 887# 888# CIF_FPU is set, restore floating-point controls and floating-point registers. 889# 890.Lio_vxrs: 891 larl %r14,.Lio_return 892 jg load_fpu_regs 893 894# 895# _TIF_GUARDED_STORAGE is set, call guarded_storage_load 896# 897.Lio_guarded_storage: 898 # TRACE_IRQS_ON already done at .Lio_return 899 ssm __LC_SVC_NEW_PSW # reenable interrupts 900 lgr %r2,%r11 # pass pointer to pt_regs 901 brasl %r14,gs_load_bc_cb 902 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 903 TRACE_IRQS_OFF 904 j .Lio_return 905 906# 907# _TIF_NEED_RESCHED is set, call schedule 908# 909.Lio_reschedule: 910 # TRACE_IRQS_ON already done at .Lio_return 911 ssm __LC_SVC_NEW_PSW # reenable interrupts 912 brasl %r14,schedule # call scheduler 913 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 914 TRACE_IRQS_OFF 915 j .Lio_return 916 917# 918# _TIF_PATCH_PENDING is set, call klp_update_patch_state 919# 920#ifdef CONFIG_LIVEPATCH 921.Lio_patch_pending: 922 lg %r2,__LC_CURRENT # pass pointer to task struct 923 larl %r14,.Lio_return 924 jg klp_update_patch_state 925#endif 926 927# 928# _TIF_SIGPENDING or is set, call do_signal 929# 930.Lio_sigpending: 931 # TRACE_IRQS_ON already done at .Lio_return 932 ssm __LC_SVC_NEW_PSW # reenable interrupts 933 lgr %r2,%r11 # pass pointer to pt_regs 934 brasl %r14,do_signal 935 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 936 TRACE_IRQS_OFF 937 j .Lio_return 938 939# 940# _TIF_NOTIFY_RESUME or is set, call do_notify_resume 941# 942.Lio_notify_resume: 943 # TRACE_IRQS_ON already done at .Lio_return 944 ssm __LC_SVC_NEW_PSW # reenable interrupts 945 lgr %r2,%r11 # pass pointer to pt_regs 946 brasl %r14,do_notify_resume 947 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 948 TRACE_IRQS_OFF 949 j .Lio_return 950ENDPROC(io_int_handler) 951 952/* 953 * External interrupt handler routine 954 */ 955ENTRY(ext_int_handler) 956 STCK __LC_INT_CLOCK 957 stpt __LC_ASYNC_ENTER_TIMER 958 BPOFF 959 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 960 lg %r12,__LC_CURRENT 961 larl %r13,cleanup_critical 962 lmg %r8,%r9,__LC_EXT_OLD_PSW 963 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER 964 stmg %r0,%r7,__PT_R0(%r11) 965 # clear user controlled registers to prevent speculative use 966 xgr %r0,%r0 967 xgr %r1,%r1 968 xgr %r2,%r2 969 xgr %r3,%r3 970 xgr %r4,%r4 971 xgr %r5,%r5 972 xgr %r6,%r6 973 xgr %r7,%r7 974 xgr %r10,%r10 975 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 976 stmg %r8,%r9,__PT_PSW(%r11) 977 lghi %r1,__LC_EXT_PARAMS2 978 mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR 979 mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS 980 mvc __PT_INT_PARM_LONG(8,%r11),0(%r1) 981 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 982 TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ 983 jo .Lio_restore 984 TRACE_IRQS_OFF 985 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 986 lgr %r2,%r11 # pass pointer to pt_regs 987 lghi %r3,EXT_INTERRUPT 988 brasl %r14,do_IRQ 989 j .Lio_return 990ENDPROC(ext_int_handler) 991 992/* 993 * Load idle PSW. The second "half" of this function is in .Lcleanup_idle. 994 */ 995ENTRY(psw_idle) 996 stg %r3,__SF_EMPTY(%r15) 997 larl %r1,.Lpsw_idle_lpsw+4 998 stg %r1,__SF_EMPTY+8(%r15) 999 larl %r1,smp_cpu_mtid 1000 llgf %r1,0(%r1) 1001 ltgr %r1,%r1 1002 jz .Lpsw_idle_stcctm 1003 .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15) 1004.Lpsw_idle_stcctm: 1005 oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT 1006 BPON 1007 STCK __CLOCK_IDLE_ENTER(%r2) 1008 stpt __TIMER_IDLE_ENTER(%r2) 1009.Lpsw_idle_lpsw: 1010 lpswe __SF_EMPTY(%r15) 1011 BR_EX %r14 1012.Lpsw_idle_end: 1013ENDPROC(psw_idle) 1014 1015/* 1016 * Store floating-point controls and floating-point or vector register 1017 * depending whether the vector facility is available. A critical section 1018 * cleanup assures that the registers are stored even if interrupted for 1019 * some other work. The CIF_FPU flag is set to trigger a lazy restore 1020 * of the register contents at return from io or a system call. 1021 */ 1022ENTRY(save_fpu_regs) 1023 lg %r2,__LC_CURRENT 1024 aghi %r2,__TASK_thread 1025 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 1026 jo .Lsave_fpu_regs_exit 1027 stfpc __THREAD_FPU_fpc(%r2) 1028 lg %r3,__THREAD_FPU_regs(%r2) 1029 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 1030 jz .Lsave_fpu_regs_fp # no -> store FP regs 1031 VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) 1032 VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3) 1033 j .Lsave_fpu_regs_done # -> set CIF_FPU flag 1034.Lsave_fpu_regs_fp: 1035 std 0,0(%r3) 1036 std 1,8(%r3) 1037 std 2,16(%r3) 1038 std 3,24(%r3) 1039 std 4,32(%r3) 1040 std 5,40(%r3) 1041 std 6,48(%r3) 1042 std 7,56(%r3) 1043 std 8,64(%r3) 1044 std 9,72(%r3) 1045 std 10,80(%r3) 1046 std 11,88(%r3) 1047 std 12,96(%r3) 1048 std 13,104(%r3) 1049 std 14,112(%r3) 1050 std 15,120(%r3) 1051.Lsave_fpu_regs_done: 1052 oi __LC_CPU_FLAGS+7,_CIF_FPU 1053.Lsave_fpu_regs_exit: 1054 BR_EX %r14 1055.Lsave_fpu_regs_end: 1056ENDPROC(save_fpu_regs) 1057EXPORT_SYMBOL(save_fpu_regs) 1058 1059/* 1060 * Load floating-point controls and floating-point or vector registers. 1061 * A critical section cleanup assures that the register contents are 1062 * loaded even if interrupted for some other work. 1063 * 1064 * There are special calling conventions to fit into sysc and io return work: 1065 * %r15: <kernel stack> 1066 * The function requires: 1067 * %r4 1068 */ 1069load_fpu_regs: 1070 lg %r4,__LC_CURRENT 1071 aghi %r4,__TASK_thread 1072 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 1073 jno .Lload_fpu_regs_exit 1074 lfpc __THREAD_FPU_fpc(%r4) 1075 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 1076 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area 1077 jz .Lload_fpu_regs_fp # -> no VX, load FP regs 1078 VLM %v0,%v15,0,%r4 1079 VLM %v16,%v31,256,%r4 1080 j .Lload_fpu_regs_done 1081.Lload_fpu_regs_fp: 1082 ld 0,0(%r4) 1083 ld 1,8(%r4) 1084 ld 2,16(%r4) 1085 ld 3,24(%r4) 1086 ld 4,32(%r4) 1087 ld 5,40(%r4) 1088 ld 6,48(%r4) 1089 ld 7,56(%r4) 1090 ld 8,64(%r4) 1091 ld 9,72(%r4) 1092 ld 10,80(%r4) 1093 ld 11,88(%r4) 1094 ld 12,96(%r4) 1095 ld 13,104(%r4) 1096 ld 14,112(%r4) 1097 ld 15,120(%r4) 1098.Lload_fpu_regs_done: 1099 ni __LC_CPU_FLAGS+7,255-_CIF_FPU 1100.Lload_fpu_regs_exit: 1101 BR_EX %r14 1102.Lload_fpu_regs_end: 1103ENDPROC(load_fpu_regs) 1104 1105.L__critical_end: 1106 1107/* 1108 * Machine check handler routines 1109 */ 1110ENTRY(mcck_int_handler) 1111 STCK __LC_MCCK_CLOCK 1112 BPOFF 1113 la %r1,4095 # validate r1 1114 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer 1115 sckc __LC_CLOCK_COMPARATOR # validate comparator 1116 lam %a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs 1117 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs 1118 lg %r12,__LC_CURRENT 1119 larl %r13,cleanup_critical 1120 lmg %r8,%r9,__LC_MCK_OLD_PSW 1121 TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE 1122 jo .Lmcck_panic # yes -> rest of mcck code invalid 1123 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID 1124 jno .Lmcck_panic # control registers invalid -> panic 1125 la %r14,4095 1126 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs 1127 ptlb 1128 lg %r11,__LC_MCESAD-4095(%r14) # extended machine check save area 1129 nill %r11,0xfc00 # MCESA_ORIGIN_MASK 1130 TSTMSK __LC_CREGS_SAVE_AREA+16-4095(%r14),CR2_GUARDED_STORAGE 1131 jno 0f 1132 TSTMSK __LC_MCCK_CODE,MCCK_CODE_GS_VALID 1133 jno 0f 1134 .insn rxy,0xe3000000004d,0,__MCESA_GS_SAVE_AREA(%r11) # LGSC 11350: l %r14,__LC_FP_CREG_SAVE_AREA-4095(%r14) 1136 TSTMSK __LC_MCCK_CODE,MCCK_CODE_FC_VALID 1137 jo 0f 1138 sr %r14,%r14 11390: sfpc %r14 1140 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 1141 jo 0f 1142 lghi %r14,__LC_FPREGS_SAVE_AREA 1143 ld %f0,0(%r14) 1144 ld %f1,8(%r14) 1145 ld %f2,16(%r14) 1146 ld %f3,24(%r14) 1147 ld %f4,32(%r14) 1148 ld %f5,40(%r14) 1149 ld %f6,48(%r14) 1150 ld %f7,56(%r14) 1151 ld %f8,64(%r14) 1152 ld %f9,72(%r14) 1153 ld %f10,80(%r14) 1154 ld %f11,88(%r14) 1155 ld %f12,96(%r14) 1156 ld %f13,104(%r14) 1157 ld %f14,112(%r14) 1158 ld %f15,120(%r14) 1159 j 1f 11600: VLM %v0,%v15,0,%r11 1161 VLM %v16,%v31,256,%r11 11621: lghi %r14,__LC_CPU_TIMER_SAVE_AREA 1163 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 1164 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID 1165 jo 3f 1166 la %r14,__LC_SYNC_ENTER_TIMER 1167 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER 1168 jl 0f 1169 la %r14,__LC_ASYNC_ENTER_TIMER 11700: clc 0(8,%r14),__LC_EXIT_TIMER 1171 jl 1f 1172 la %r14,__LC_EXIT_TIMER 11731: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER 1174 jl 2f 1175 la %r14,__LC_LAST_UPDATE_TIMER 11762: spt 0(%r14) 1177 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 11783: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID 1179 jno .Lmcck_panic 1180 tmhh %r8,0x0001 # interrupting from user ? 1181 jnz 4f 1182 TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID 1183 jno .Lmcck_panic 11844: ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off 1185 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER 1186.Lmcck_skip: 1187 lghi %r14,__LC_GPREGS_SAVE_AREA+64 1188 stmg %r0,%r7,__PT_R0(%r11) 1189 # clear user controlled registers to prevent speculative use 1190 xgr %r0,%r0 1191 xgr %r1,%r1 1192 xgr %r2,%r2 1193 xgr %r3,%r3 1194 xgr %r4,%r4 1195 xgr %r5,%r5 1196 xgr %r6,%r6 1197 xgr %r7,%r7 1198 xgr %r10,%r10 1199 mvc __PT_R8(64,%r11),0(%r14) 1200 stmg %r8,%r9,__PT_PSW(%r11) 1201 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 1202 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 1203 lgr %r2,%r11 # pass pointer to pt_regs 1204 brasl %r14,s390_do_machine_check 1205 tm __PT_PSW+1(%r11),0x01 # returning to user ? 1206 jno .Lmcck_return 1207 lg %r1,__LC_KERNEL_STACK # switch to kernel stack 1208 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 1209 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 1210 la %r11,STACK_FRAME_OVERHEAD(%r1) 1211 lgr %r15,%r1 1212 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING 1213 jno .Lmcck_return 1214 TRACE_IRQS_OFF 1215 brasl %r14,s390_handle_mcck 1216 TRACE_IRQS_ON 1217.Lmcck_return: 1218 lg %r14,__LC_VDSO_PER_CPU 1219 lmg %r0,%r10,__PT_R0(%r11) 1220 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW 1221 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 1222 jno 0f 1223 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 1224 stpt __LC_EXIT_TIMER 1225 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 12260: lmg %r11,%r15,__PT_R11(%r11) 1227 b __LC_RETURN_MCCK_LPSWE 1228 1229.Lmcck_panic: 1230 lg %r15,__LC_NODAT_STACK 1231 la %r11,STACK_FRAME_OVERHEAD(%r15) 1232 j .Lmcck_skip 1233ENDPROC(mcck_int_handler) 1234 1235# 1236# PSW restart interrupt handler 1237# 1238ENTRY(restart_int_handler) 1239 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40 1240 stg %r15,__LC_SAVE_AREA_RESTART 1241 lg %r15,__LC_RESTART_STACK 1242 xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15) 1243 stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 1244 mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART 1245 mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW 1246 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) 1247 lg %r1,__LC_RESTART_FN # load fn, parm & source cpu 1248 lg %r2,__LC_RESTART_DATA 1249 lg %r3,__LC_RESTART_SOURCE 1250 ltgr %r3,%r3 # test source cpu address 1251 jm 1f # negative -> skip source stop 12520: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu 1253 brc 10,0b # wait for status stored 12541: basr %r14,%r1 # call function 1255 stap __SF_EMPTY(%r15) # store cpu address 1256 llgh %r3,__SF_EMPTY(%r15) 12572: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu 1258 brc 2,2b 12593: j 3b 1260ENDPROC(restart_int_handler) 1261 1262 .section .kprobes.text, "ax" 1263 1264#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK) 1265/* 1266 * The synchronous or the asynchronous stack overflowed. We are dead. 1267 * No need to properly save the registers, we are going to panic anyway. 1268 * Setup a pt_regs so that show_trace can provide a good call trace. 1269 */ 1270ENTRY(stack_overflow) 1271 lg %r15,__LC_NODAT_STACK # change to panic stack 1272 la %r11,STACK_FRAME_OVERHEAD(%r15) 1273 stmg %r0,%r7,__PT_R0(%r11) 1274 stmg %r8,%r9,__PT_PSW(%r11) 1275 mvc __PT_R8(64,%r11),0(%r14) 1276 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 1277 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 1278 lgr %r2,%r11 # pass pointer to pt_regs 1279 jg kernel_stack_overflow 1280ENDPROC(stack_overflow) 1281#endif 1282 1283ENTRY(cleanup_critical) 1284 cghi %r9,__LC_RETURN_LPSWE 1285 je .Lcleanup_lpswe 1286#if IS_ENABLED(CONFIG_KVM) 1287 clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap 1288 jl 0f 1289 clg %r9,BASED(.Lcleanup_table_sie+8)# .Lsie_done 1290 jl .Lcleanup_sie 1291#endif 1292 clg %r9,BASED(.Lcleanup_table) # system_call 1293 jl 0f 1294 clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc 1295 jl .Lcleanup_system_call 1296 clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif 1297 jl 0f 1298 clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore 1299 jl .Lcleanup_sysc_tif 1300 clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done 1301 jl .Lcleanup_sysc_restore 1302 clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif 1303 jl 0f 1304 clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore 1305 jl .Lcleanup_io_tif 1306 clg %r9,BASED(.Lcleanup_table+56) # .Lio_done 1307 jl .Lcleanup_io_restore 1308 clg %r9,BASED(.Lcleanup_table+64) # psw_idle 1309 jl 0f 1310 clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end 1311 jl .Lcleanup_idle 1312 clg %r9,BASED(.Lcleanup_table+80) # save_fpu_regs 1313 jl 0f 1314 clg %r9,BASED(.Lcleanup_table+88) # .Lsave_fpu_regs_end 1315 jl .Lcleanup_save_fpu_regs 1316 clg %r9,BASED(.Lcleanup_table+96) # load_fpu_regs 1317 jl 0f 1318 clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end 1319 jl .Lcleanup_load_fpu_regs 13200: BR_EX %r14,%r11 1321ENDPROC(cleanup_critical) 1322 1323 .align 8 1324.Lcleanup_table: 1325 .quad system_call 1326 .quad .Lsysc_do_svc 1327 .quad .Lsysc_tif 1328 .quad .Lsysc_restore 1329 .quad .Lsysc_done 1330 .quad .Lio_tif 1331 .quad .Lio_restore 1332 .quad .Lio_done 1333 .quad psw_idle 1334 .quad .Lpsw_idle_end 1335 .quad save_fpu_regs 1336 .quad .Lsave_fpu_regs_end 1337 .quad load_fpu_regs 1338 .quad .Lload_fpu_regs_end 1339 1340#if IS_ENABLED(CONFIG_KVM) 1341.Lcleanup_table_sie: 1342 .quad .Lsie_gmap 1343 .quad .Lsie_done 1344 1345.Lcleanup_sie: 1346 cghi %r11,__LC_SAVE_AREA_ASYNC #Is this in normal interrupt? 1347 je 1f 1348 slg %r9,BASED(.Lsie_crit_mcck_start) 1349 clg %r9,BASED(.Lsie_crit_mcck_length) 1350 jh 1f 1351 oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST 13521: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 1353 lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer 1354 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 1355 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 1356 larl %r9,sie_exit # skip forward to sie_exit 1357 BR_EX %r14,%r11 1358#endif 1359 1360.Lcleanup_system_call: 1361 # check if stpt has been executed 1362 clg %r9,BASED(.Lcleanup_system_call_insn) 1363 jh 0f 1364 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 1365 cghi %r11,__LC_SAVE_AREA_ASYNC 1366 je 0f 1367 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER 13680: # check if stmg has been executed 1369 clg %r9,BASED(.Lcleanup_system_call_insn+8) 1370 jh 0f 1371 mvc __LC_SAVE_AREA_SYNC(64),0(%r11) 13720: # check if base register setup + TIF bit load has been done 1373 clg %r9,BASED(.Lcleanup_system_call_insn+16) 1374 jhe 0f 1375 # set up saved register r12 task struct pointer 1376 stg %r12,32(%r11) 1377 # set up saved register r13 __TASK_thread offset 1378 mvc 40(8,%r11),BASED(.Lcleanup_system_call_const) 13790: # check if the user time update has been done 1380 clg %r9,BASED(.Lcleanup_system_call_insn+24) 1381 jh 0f 1382 lg %r15,__LC_EXIT_TIMER 1383 slg %r15,__LC_SYNC_ENTER_TIMER 1384 alg %r15,__LC_USER_TIMER 1385 stg %r15,__LC_USER_TIMER 13860: # check if the system time update has been done 1387 clg %r9,BASED(.Lcleanup_system_call_insn+32) 1388 jh 0f 1389 lg %r15,__LC_LAST_UPDATE_TIMER 1390 slg %r15,__LC_EXIT_TIMER 1391 alg %r15,__LC_SYSTEM_TIMER 1392 stg %r15,__LC_SYSTEM_TIMER 13930: # update accounting time stamp 1394 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 1395 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 1396 # set up saved register r11 1397 lg %r15,__LC_KERNEL_STACK 1398 la %r9,STACK_FRAME_OVERHEAD(%r15) 1399 stg %r9,24(%r11) # r11 pt_regs pointer 1400 # fill pt_regs 1401 mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC 1402 stmg %r0,%r7,__PT_R0(%r9) 1403 mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW 1404 mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC 1405 xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9) 1406 mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL 1407 # setup saved register r15 1408 stg %r15,56(%r11) # r15 stack pointer 1409 # set new psw address and exit 1410 larl %r9,.Lsysc_do_svc 1411 BR_EX %r14,%r11 1412.Lcleanup_system_call_insn: 1413 .quad system_call 1414 .quad .Lsysc_stmg 1415 .quad .Lsysc_per 1416 .quad .Lsysc_vtime+36 1417 .quad .Lsysc_vtime+42 1418.Lcleanup_system_call_const: 1419 .quad __TASK_thread 1420 1421.Lcleanup_sysc_tif: 1422 larl %r9,.Lsysc_tif 1423 BR_EX %r14,%r11 1424 1425.Lcleanup_sysc_restore: 1426 # check if stpt has been executed 1427 clg %r9,BASED(.Lcleanup_sysc_restore_insn) 1428 jh 0f 1429 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER 1430 cghi %r11,__LC_SAVE_AREA_ASYNC 1431 je 0f 1432 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER 14330: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8) 1434 je 1f 1435 lg %r9,24(%r11) # get saved pointer to pt_regs 1436 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 1437 mvc 0(64,%r11),__PT_R8(%r9) 1438 lmg %r0,%r7,__PT_R0(%r9) 1439.Lcleanup_lpswe: 14401: lmg %r8,%r9,__LC_RETURN_PSW 1441 BR_EX %r14,%r11 1442.Lcleanup_sysc_restore_insn: 1443 .quad .Lsysc_exit_timer 1444 .quad .Lsysc_done - 4 1445 1446.Lcleanup_io_tif: 1447 larl %r9,.Lio_tif 1448 BR_EX %r14,%r11 1449 1450.Lcleanup_io_restore: 1451 # check if stpt has been executed 1452 clg %r9,BASED(.Lcleanup_io_restore_insn) 1453 jh 0f 1454 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER 14550: clg %r9,BASED(.Lcleanup_io_restore_insn+8) 1456 je 1f 1457 lg %r9,24(%r11) # get saved r11 pointer to pt_regs 1458 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 1459 mvc 0(64,%r11),__PT_R8(%r9) 1460 lmg %r0,%r7,__PT_R0(%r9) 14611: lmg %r8,%r9,__LC_RETURN_PSW 1462 BR_EX %r14,%r11 1463.Lcleanup_io_restore_insn: 1464 .quad .Lio_exit_timer 1465 .quad .Lio_done - 4 1466 1467.Lcleanup_idle: 1468 ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT 1469 # copy interrupt clock & cpu timer 1470 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK 1471 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER 1472 cghi %r11,__LC_SAVE_AREA_ASYNC 1473 je 0f 1474 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK 1475 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER 14760: # check if stck & stpt have been executed 1477 clg %r9,BASED(.Lcleanup_idle_insn) 1478 jhe 1f 1479 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) 1480 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) 14811: # calculate idle cycles 1482 clg %r9,BASED(.Lcleanup_idle_insn) 1483 jl 3f 1484 larl %r1,smp_cpu_mtid 1485 llgf %r1,0(%r1) 1486 ltgr %r1,%r1 1487 jz 3f 1488 .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15) 1489 larl %r3,mt_cycles 1490 ag %r3,__LC_PERCPU_OFFSET 1491 la %r4,__SF_EMPTY+16(%r15) 14922: lg %r0,0(%r3) 1493 slg %r0,0(%r4) 1494 alg %r0,64(%r4) 1495 stg %r0,0(%r3) 1496 la %r3,8(%r3) 1497 la %r4,8(%r4) 1498 brct %r1,2b 14993: # account system time going idle 1500 lg %r9,__LC_STEAL_TIMER 1501 alg %r9,__CLOCK_IDLE_ENTER(%r2) 1502 slg %r9,__LC_LAST_UPDATE_CLOCK 1503 stg %r9,__LC_STEAL_TIMER 1504 mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) 1505 lg %r9,__LC_SYSTEM_TIMER 1506 alg %r9,__LC_LAST_UPDATE_TIMER 1507 slg %r9,__TIMER_IDLE_ENTER(%r2) 1508 stg %r9,__LC_SYSTEM_TIMER 1509 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) 1510 # prepare return psw 1511 nihh %r8,0xfcfd # clear irq & wait state bits 1512 lg %r9,48(%r11) # return from psw_idle 1513 BR_EX %r14,%r11 1514.Lcleanup_idle_insn: 1515 .quad .Lpsw_idle_lpsw 1516 1517.Lcleanup_save_fpu_regs: 1518 larl %r9,save_fpu_regs 1519 BR_EX %r14,%r11 1520 1521.Lcleanup_load_fpu_regs: 1522 larl %r9,load_fpu_regs 1523 BR_EX %r14,%r11 1524 1525/* 1526 * Integer constants 1527 */ 1528 .align 8 1529.Lcritical_start: 1530 .quad .L__critical_start 1531.Lcritical_length: 1532 .quad .L__critical_end - .L__critical_start 1533#if IS_ENABLED(CONFIG_KVM) 1534.Lsie_critical_start: 1535 .quad .Lsie_gmap 1536.Lsie_critical_length: 1537 .quad .Lsie_done - .Lsie_gmap 1538.Lsie_crit_mcck_start: 1539 .quad .Lsie_entry 1540.Lsie_crit_mcck_length: 1541 .quad .Lsie_skip - .Lsie_entry 1542#endif 1543 .section .rodata, "a" 1544#define SYSCALL(esame,emu) .quad __s390x_ ## esame 1545 .globl sys_call_table 1546sys_call_table: 1547#include "asm/syscall_table.h" 1548#undef SYSCALL 1549 1550#ifdef CONFIG_COMPAT 1551 1552#define SYSCALL(esame,emu) .quad __s390_ ## emu 1553 .globl sys_call_table_emu 1554sys_call_table_emu: 1555#include "asm/syscall_table.h" 1556#undef SYSCALL 1557#endif 1558