1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * S390 low-level entry points. 4 * 5 * Copyright IBM Corp. 1999, 2012 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Hartmut Penner (hp@de.ibm.com), 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 9 * Heiko Carstens <heiko.carstens@de.ibm.com> 10 */ 11 12#include <linux/init.h> 13#include <linux/linkage.h> 14#include <asm/alternative-asm.h> 15#include <asm/processor.h> 16#include <asm/cache.h> 17#include <asm/ctl_reg.h> 18#include <asm/dwarf.h> 19#include <asm/errno.h> 20#include <asm/ptrace.h> 21#include <asm/thread_info.h> 22#include <asm/asm-offsets.h> 23#include <asm/unistd.h> 24#include <asm/page.h> 25#include <asm/sigp.h> 26#include <asm/irq.h> 27#include <asm/vx-insn.h> 28#include <asm/setup.h> 29#include <asm/nmi.h> 30#include <asm/export.h> 31#include <asm/nospec-insn.h> 32 33__PT_R0 = __PT_GPRS 34__PT_R1 = __PT_GPRS + 8 35__PT_R2 = __PT_GPRS + 16 36__PT_R3 = __PT_GPRS + 24 37__PT_R4 = __PT_GPRS + 32 38__PT_R5 = __PT_GPRS + 40 39__PT_R6 = __PT_GPRS + 48 40__PT_R7 = __PT_GPRS + 56 41__PT_R8 = __PT_GPRS + 64 42__PT_R9 = __PT_GPRS + 72 43__PT_R10 = __PT_GPRS + 80 44__PT_R11 = __PT_GPRS + 88 45__PT_R12 = __PT_GPRS + 96 46__PT_R13 = __PT_GPRS + 104 47__PT_R14 = __PT_GPRS + 112 48__PT_R15 = __PT_GPRS + 120 49 50STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER 51STACK_SIZE = 1 << STACK_SHIFT 52STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE 53 54_LPP_OFFSET = __LC_LPP 55 56 .macro CHECK_STACK savearea 57#ifdef CONFIG_CHECK_STACK 58 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 59 lghi %r14,\savearea 60 jz stack_overflow 61#endif 62 .endm 63 64 .macro CHECK_VMAP_STACK savearea,oklabel 65#ifdef CONFIG_VMAP_STACK 66 lgr %r14,%r15 67 nill %r14,0x10000 - STACK_SIZE 68 oill %r14,STACK_INIT 69 clg %r14,__LC_KERNEL_STACK 70 je \oklabel 71 clg %r14,__LC_ASYNC_STACK 72 je \oklabel 73 clg %r14,__LC_MCCK_STACK 74 je \oklabel 75 clg %r14,__LC_NODAT_STACK 76 je \oklabel 77 clg %r14,__LC_RESTART_STACK 78 je \oklabel 79 lghi %r14,\savearea 80 j stack_overflow 81#else 82 j \oklabel 83#endif 84 .endm 85 86 .macro STCK savearea 87 ALTERNATIVE ".insn s,0xb2050000,\savearea", \ 88 ".insn s,0xb27c0000,\savearea", 25 89 .endm 90 91 /* 92 * The TSTMSK macro generates a test-under-mask instruction by 93 * calculating the memory offset for the specified mask value. 94 * Mask value can be any constant. The macro shifts the mask 95 * value to calculate the memory offset for the test-under-mask 96 * instruction. 97 */ 98 .macro TSTMSK addr, mask, size=8, bytepos=0 99 .if (\bytepos < \size) && (\mask >> 8) 100 .if (\mask & 0xff) 101 .error "Mask exceeds byte boundary" 102 .endif 103 TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)" 104 .exitm 105 .endif 106 .ifeq \mask 107 .error "Mask must not be zero" 108 .endif 109 off = \size - \bytepos - 1 110 tm off+\addr, \mask 111 .endm 112 113 .macro BPOFF 114 ALTERNATIVE "", ".long 0xb2e8c000", 82 115 .endm 116 117 .macro BPON 118 ALTERNATIVE "", ".long 0xb2e8d000", 82 119 .endm 120 121 .macro BPENTER tif_ptr,tif_mask 122 ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \ 123 "", 82 124 .endm 125 126 .macro BPEXIT tif_ptr,tif_mask 127 TSTMSK \tif_ptr,\tif_mask 128 ALTERNATIVE "jz .+8; .long 0xb2e8c000", \ 129 "jnz .+8; .long 0xb2e8d000", 82 130 .endm 131 132 GEN_BR_THUNK %r14 133 GEN_BR_THUNK %r14,%r13 134 135 .section .kprobes.text, "ax" 136.Ldummy: 137 /* 138 * This nop exists only in order to avoid that __bpon starts at 139 * the beginning of the kprobes text section. In that case we would 140 * have several symbols at the same address. E.g. objdump would take 141 * an arbitrary symbol name when disassembling this code. 142 * With the added nop in between the __bpon symbol is unique 143 * again. 144 */ 145 nop 0 146 147ENTRY(__bpon) 148 .globl __bpon 149 BPON 150 BR_EX %r14 151ENDPROC(__bpon) 152 153/* 154 * Scheduler resume function, called by switch_to 155 * gpr2 = (task_struct *) prev 156 * gpr3 = (task_struct *) next 157 * Returns: 158 * gpr2 = prev 159 */ 160ENTRY(__switch_to) 161 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 162 lghi %r4,__TASK_stack 163 lghi %r1,__TASK_thread 164 llill %r5,STACK_INIT 165 stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev 166 lg %r15,0(%r4,%r3) # start of kernel stack of next 167 agr %r15,%r5 # end of kernel stack of next 168 stg %r3,__LC_CURRENT # store task struct of next 169 stg %r15,__LC_KERNEL_STACK # store end of kernel stack 170 lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next 171 aghi %r3,__TASK_pid 172 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next 173 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 174 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40 175 BR_EX %r14 176ENDPROC(__switch_to) 177 178#if IS_ENABLED(CONFIG_KVM) 179/* 180 * sie64a calling convention: 181 * %r2 pointer to sie control block 182 * %r3 guest register save area 183 */ 184ENTRY(sie64a) 185 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers 186 lg %r12,__LC_CURRENT 187 stg %r2,__SF_SIE_CONTROL(%r15) # save control block pointer 188 stg %r3,__SF_SIE_SAVEAREA(%r15) # save guest register save area 189 xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0 190 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags 191 lmg %r0,%r13,0(%r3) # load guest gprs 0-13 192 lg %r14,__LC_GMAP # get gmap pointer 193 ltgr %r14,%r14 194 jz .Lsie_gmap 195 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce 196.Lsie_gmap: 197 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 198 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now 199 tm __SIE_PROG20+3(%r14),3 # last exit... 200 jnz .Lsie_skip 201 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 202 jo .Lsie_skip # exit if fp/vx regs changed 203 BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 204.Lsie_entry: 205 sie 0(%r14) 206 BPOFF 207 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 208.Lsie_skip: 209 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 210 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce 211.Lsie_done: 212# some program checks are suppressing. C code (e.g. do_protection_exception) 213# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There 214# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. 215# Other instructions between sie64a and .Lsie_done should not cause program 216# interrupts. So lets use 3 nops as a landing pad for all possible rewinds. 217# See also .Lcleanup_sie_mcck/.Lcleanup_sie_int 218.Lrewind_pad6: 219 nopr 7 220.Lrewind_pad4: 221 nopr 7 222.Lrewind_pad2: 223 nopr 7 224 .globl sie_exit 225sie_exit: 226 lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area 227 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 228 xgr %r0,%r0 # clear guest registers to 229 xgr %r1,%r1 # prevent speculative use 230 xgr %r3,%r3 231 xgr %r4,%r4 232 xgr %r5,%r5 233 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 234 lg %r2,__SF_SIE_REASON(%r15) # return exit reason code 235 BR_EX %r14 236.Lsie_fault: 237 lghi %r14,-EFAULT 238 stg %r14,__SF_SIE_REASON(%r15) # set exit reason code 239 j sie_exit 240 241 EX_TABLE(.Lrewind_pad6,.Lsie_fault) 242 EX_TABLE(.Lrewind_pad4,.Lsie_fault) 243 EX_TABLE(.Lrewind_pad2,.Lsie_fault) 244 EX_TABLE(sie_exit,.Lsie_fault) 245ENDPROC(sie64a) 246EXPORT_SYMBOL(sie64a) 247EXPORT_SYMBOL(sie_exit) 248#endif 249 250/* 251 * SVC interrupt handler routine. System calls are synchronous events and 252 * are entered with interrupts disabled. 253 */ 254 255ENTRY(system_call) 256 stpt __LC_SYS_ENTER_TIMER 257 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 258 BPOFF 259 lghi %r14,0 260.Lsysc_per: 261 lctlg %c1,%c1,__LC_KERNEL_ASCE 262 lg %r12,__LC_CURRENT 263 lg %r15,__LC_KERNEL_STACK 264 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 265 stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 266 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 267 # clear user controlled register to prevent speculative use 268 xgr %r0,%r0 269 xgr %r1,%r1 270 xgr %r4,%r4 271 xgr %r5,%r5 272 xgr %r6,%r6 273 xgr %r7,%r7 274 xgr %r8,%r8 275 xgr %r9,%r9 276 xgr %r10,%r10 277 xgr %r11,%r11 278 la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 279 lgr %r3,%r14 280 brasl %r14,__do_syscall 281 lctlg %c1,%c1,__LC_USER_ASCE 282 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 283 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 284 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 285 stpt __LC_EXIT_TIMER 286 b __LC_RETURN_LPSWE 287ENDPROC(system_call) 288 289# 290# a new process exits the kernel with ret_from_fork 291# 292ENTRY(ret_from_fork) 293 lgr %r3,%r11 294 brasl %r14,__ret_from_fork 295 lctlg %c1,%c1,__LC_USER_ASCE 296 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 297 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 298 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 299 stpt __LC_EXIT_TIMER 300 b __LC_RETURN_LPSWE 301ENDPROC(ret_from_fork) 302 303/* 304 * Program check handler routine 305 */ 306 307ENTRY(pgm_check_handler) 308 stpt __LC_SYS_ENTER_TIMER 309 BPOFF 310 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 311 lg %r12,__LC_CURRENT 312 lghi %r10,0 313 lmg %r8,%r9,__LC_PGM_OLD_PSW 314 tmhh %r8,0x0001 # coming from user space? 315 jno .Lpgm_skip_asce 316 lctlg %c1,%c1,__LC_KERNEL_ASCE 317 j 3f # -> fault in user space 318.Lpgm_skip_asce: 319#if IS_ENABLED(CONFIG_KVM) 320 # cleanup critical section for program checks in sie64a 321 lgr %r14,%r9 322 larl %r13,.Lsie_gmap 323 slgr %r14,%r13 324 lghi %r13,.Lsie_done - .Lsie_gmap 325 clgr %r14,%r13 326 jhe 1f 327 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 328 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 329 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce 330 larl %r9,sie_exit # skip forward to sie_exit 331 lghi %r10,_PIF_GUEST_FAULT 332#endif 3331: tmhh %r8,0x4000 # PER bit set in old PSW ? 334 jnz 2f # -> enabled, can't be a double fault 335 tm __LC_PGM_ILC+3,0x80 # check for per exception 336 jnz .Lpgm_svcper # -> single stepped svc 3372: CHECK_STACK __LC_SAVE_AREA_SYNC 338 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 339 # CHECK_VMAP_STACK branches to stack_overflow or 4f 340 CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f 3413: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 342 lg %r15,__LC_KERNEL_STACK 3434: la %r11,STACK_FRAME_OVERHEAD(%r15) 344 stg %r10,__PT_FLAGS(%r11) 345 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 346 stmg %r0,%r7,__PT_R0(%r11) 347 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 348 stmg %r8,%r9,__PT_PSW(%r11) 349 350 # clear user controlled registers to prevent speculative use 351 xgr %r0,%r0 352 xgr %r1,%r1 353 xgr %r3,%r3 354 xgr %r4,%r4 355 xgr %r5,%r5 356 xgr %r6,%r6 357 xgr %r7,%r7 358 lgr %r2,%r11 359 brasl %r14,__do_pgm_check 360 tmhh %r8,0x0001 # returning to user space? 361 jno .Lpgm_exit_kernel 362 lctlg %c1,%c1,__LC_USER_ASCE 363 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 364 stpt __LC_EXIT_TIMER 365.Lpgm_exit_kernel: 366 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 367 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 368 b __LC_RETURN_LPSWE 369 370# 371# single stepped system call 372# 373.Lpgm_svcper: 374 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW 375 larl %r14,.Lsysc_per 376 stg %r14,__LC_RETURN_PSW+8 377 lghi %r14,1 378 lpswe __LC_RETURN_PSW # branch to .Lsysc_per 379ENDPROC(pgm_check_handler) 380 381/* 382 * Interrupt handler macro used for external and IO interrupts. 383 */ 384.macro INT_HANDLER name,lc_old_psw,handler 385ENTRY(\name) 386 STCK __LC_INT_CLOCK 387 stpt __LC_SYS_ENTER_TIMER 388 BPOFF 389 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 390 lg %r12,__LC_CURRENT 391 lmg %r8,%r9,\lc_old_psw 392 tmhh %r8,0x0001 # interrupting from user ? 393 jnz 1f 394#if IS_ENABLED(CONFIG_KVM) 395 lgr %r14,%r9 396 larl %r13,.Lsie_gmap 397 slgr %r14,%r13 398 lghi %r13,.Lsie_done - .Lsie_gmap 399 clgr %r14,%r13 400 jhe 0f 401 brasl %r14,.Lcleanup_sie_int 402#endif 4030: CHECK_STACK __LC_SAVE_AREA_ASYNC 404 lgr %r11,%r15 405 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 406 stg %r11,__SF_BACKCHAIN(%r15) 407 j 2f 4081: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 409 lctlg %c1,%c1,__LC_KERNEL_ASCE 410 lg %r15,__LC_KERNEL_STACK 411 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 4122: la %r11,STACK_FRAME_OVERHEAD(%r15) 413 stmg %r0,%r7,__PT_R0(%r11) 414 # clear user controlled registers to prevent speculative use 415 xgr %r0,%r0 416 xgr %r1,%r1 417 xgr %r3,%r3 418 xgr %r4,%r4 419 xgr %r5,%r5 420 xgr %r6,%r6 421 xgr %r7,%r7 422 xgr %r10,%r10 423 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 424 stmg %r8,%r9,__PT_PSW(%r11) 425 tm %r8,0x0001 # coming from user space? 426 jno 1f 427 lctlg %c1,%c1,__LC_KERNEL_ASCE 4281: lgr %r2,%r11 # pass pointer to pt_regs 429 brasl %r14,\handler 430 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 431 tmhh %r8,0x0001 # returning to user ? 432 jno 2f 433 lctlg %c1,%c1,__LC_USER_ASCE 434 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 435 stpt __LC_EXIT_TIMER 4362: lmg %r0,%r15,__PT_R0(%r11) 437 b __LC_RETURN_LPSWE 438ENDPROC(\name) 439.endm 440 441INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq 442INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq 443 444/* 445 * Load idle PSW. 446 */ 447ENTRY(psw_idle) 448 stg %r3,__SF_EMPTY(%r15) 449 larl %r1,psw_idle_exit 450 stg %r1,__SF_EMPTY+8(%r15) 451 larl %r1,smp_cpu_mtid 452 llgf %r1,0(%r1) 453 ltgr %r1,%r1 454 jz .Lpsw_idle_stcctm 455 .insn rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2) 456.Lpsw_idle_stcctm: 457 oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT 458 BPON 459 STCK __CLOCK_IDLE_ENTER(%r2) 460 stpt __TIMER_IDLE_ENTER(%r2) 461 lpswe __SF_EMPTY(%r15) 462.globl psw_idle_exit 463psw_idle_exit: 464 BR_EX %r14 465ENDPROC(psw_idle) 466 467/* 468 * Machine check handler routines 469 */ 470ENTRY(mcck_int_handler) 471 STCK __LC_MCCK_CLOCK 472 BPOFF 473 la %r1,4095 # validate r1 474 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer 475 sckc __LC_CLOCK_COMPARATOR # validate comparator 476 lam %a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs 477 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs 478 lg %r12,__LC_CURRENT 479 lmg %r8,%r9,__LC_MCK_OLD_PSW 480 TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE 481 jo .Lmcck_panic # yes -> rest of mcck code invalid 482 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID 483 jno .Lmcck_panic # control registers invalid -> panic 484 la %r14,4095 485 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs 486 ptlb 487 lg %r11,__LC_MCESAD-4095(%r14) # extended machine check save area 488 nill %r11,0xfc00 # MCESA_ORIGIN_MASK 489 TSTMSK __LC_CREGS_SAVE_AREA+16-4095(%r14),CR2_GUARDED_STORAGE 490 jno 0f 491 TSTMSK __LC_MCCK_CODE,MCCK_CODE_GS_VALID 492 jno 0f 493 .insn rxy,0xe3000000004d,0,__MCESA_GS_SAVE_AREA(%r11) # LGSC 4940: l %r14,__LC_FP_CREG_SAVE_AREA-4095(%r14) 495 TSTMSK __LC_MCCK_CODE,MCCK_CODE_FC_VALID 496 jo 0f 497 sr %r14,%r14 4980: sfpc %r14 499 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 500 jo 0f 501 lghi %r14,__LC_FPREGS_SAVE_AREA 502 ld %f0,0(%r14) 503 ld %f1,8(%r14) 504 ld %f2,16(%r14) 505 ld %f3,24(%r14) 506 ld %f4,32(%r14) 507 ld %f5,40(%r14) 508 ld %f6,48(%r14) 509 ld %f7,56(%r14) 510 ld %f8,64(%r14) 511 ld %f9,72(%r14) 512 ld %f10,80(%r14) 513 ld %f11,88(%r14) 514 ld %f12,96(%r14) 515 ld %f13,104(%r14) 516 ld %f14,112(%r14) 517 ld %f15,120(%r14) 518 j 1f 5190: VLM %v0,%v15,0,%r11 520 VLM %v16,%v31,256,%r11 5211: lghi %r14,__LC_CPU_TIMER_SAVE_AREA 522 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 523 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID 524 jo 3f 525 la %r14,__LC_SYS_ENTER_TIMER 526 clc 0(8,%r14),__LC_EXIT_TIMER 527 jl 1f 528 la %r14,__LC_EXIT_TIMER 5291: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER 530 jl 2f 531 la %r14,__LC_LAST_UPDATE_TIMER 5322: spt 0(%r14) 533 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 5343: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID 535 jno .Lmcck_panic 536 tmhh %r8,0x0001 # interrupting from user ? 537 jnz 4f 538 TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID 539 jno .Lmcck_panic 5404: ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off 541 tmhh %r8,0x0001 # interrupting from user ? 542 jnz .Lmcck_user 543#if IS_ENABLED(CONFIG_KVM) 544 lgr %r14,%r9 545 larl %r13,.Lsie_gmap 546 slgr %r14,%r13 547 lghi %r13,.Lsie_done - .Lsie_gmap 548 clgr %r14,%r13 549 jhe .Lmcck_stack 550 brasl %r14,.Lcleanup_sie_mcck 551#endif 552 j .Lmcck_stack 553.Lmcck_user: 554 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 555.Lmcck_stack: 556 lg %r15,__LC_MCCK_STACK 557.Lmcck_skip: 558 la %r11,STACK_FRAME_OVERHEAD(%r15) 559 stctg %c1,%c1,__PT_CR1(%r11) 560 lctlg %c1,%c1,__LC_KERNEL_ASCE 561 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 562 lghi %r14,__LC_GPREGS_SAVE_AREA+64 563 stmg %r0,%r7,__PT_R0(%r11) 564 # clear user controlled registers to prevent speculative use 565 xgr %r0,%r0 566 xgr %r1,%r1 567 xgr %r3,%r3 568 xgr %r4,%r4 569 xgr %r5,%r5 570 xgr %r6,%r6 571 xgr %r7,%r7 572 xgr %r10,%r10 573 mvc __PT_R8(64,%r11),0(%r14) 574 stmg %r8,%r9,__PT_PSW(%r11) 575 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 576 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 577 lgr %r2,%r11 # pass pointer to pt_regs 578 brasl %r14,s390_do_machine_check 579 cghi %r2,0 580 je .Lmcck_return 581 lg %r1,__LC_KERNEL_STACK # switch to kernel stack 582 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 583 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 584 la %r11,STACK_FRAME_OVERHEAD(%r1) 585 lgr %r15,%r1 586 brasl %r14,s390_handle_mcck 587.Lmcck_return: 588 lctlg %c1,%c1,__PT_CR1(%r11) 589 lmg %r0,%r10,__PT_R0(%r11) 590 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW 591 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 592 jno 0f 593 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 594 stpt __LC_EXIT_TIMER 5950: lmg %r11,%r15,__PT_R11(%r11) 596 b __LC_RETURN_MCCK_LPSWE 597 598.Lmcck_panic: 599 lg %r15,__LC_NODAT_STACK 600 j .Lmcck_skip 601ENDPROC(mcck_int_handler) 602 603# 604# PSW restart interrupt handler 605# 606ENTRY(restart_int_handler) 607 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40 608 stg %r15,__LC_SAVE_AREA_RESTART 609 lg %r15,__LC_RESTART_STACK 610 xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15) 611 stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 612 mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART 613 mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW 614 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) 615 lg %r1,__LC_RESTART_FN # load fn, parm & source cpu 616 lg %r2,__LC_RESTART_DATA 617 lg %r3,__LC_RESTART_SOURCE 618 ltgr %r3,%r3 # test source cpu address 619 jm 1f # negative -> skip source stop 6200: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu 621 brc 10,0b # wait for status stored 6221: basr %r14,%r1 # call function 623 stap __SF_EMPTY(%r15) # store cpu address 624 llgh %r3,__SF_EMPTY(%r15) 6252: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu 626 brc 2,2b 6273: j 3b 628ENDPROC(restart_int_handler) 629 630 .section .kprobes.text, "ax" 631 632#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK) 633/* 634 * The synchronous or the asynchronous stack overflowed. We are dead. 635 * No need to properly save the registers, we are going to panic anyway. 636 * Setup a pt_regs so that show_trace can provide a good call trace. 637 */ 638ENTRY(stack_overflow) 639 lg %r15,__LC_NODAT_STACK # change to panic stack 640 la %r11,STACK_FRAME_OVERHEAD(%r15) 641 stmg %r0,%r7,__PT_R0(%r11) 642 stmg %r8,%r9,__PT_PSW(%r11) 643 mvc __PT_R8(64,%r11),0(%r14) 644 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 645 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 646 lgr %r2,%r11 # pass pointer to pt_regs 647 jg kernel_stack_overflow 648ENDPROC(stack_overflow) 649#endif 650 651#if IS_ENABLED(CONFIG_KVM) 652.Lcleanup_sie_mcck: 653 larl %r13,.Lsie_entry 654 slgr %r9,%r13 655 larl %r13,.Lsie_skip 656 clgr %r9,%r13 657 jh .Lcleanup_sie_int 658 oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST 659.Lcleanup_sie_int: 660 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 661 lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer 662 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 663 lctlg %c1,%c1,__LC_KERNEL_ASCE 664 larl %r9,sie_exit # skip forward to sie_exit 665 BR_EX %r14,%r13 666 667#endif 668 .section .rodata, "a" 669#define SYSCALL(esame,emu) .quad __s390x_ ## esame 670 .globl sys_call_table 671sys_call_table: 672#include "asm/syscall_table.h" 673#undef SYSCALL 674 675#ifdef CONFIG_COMPAT 676 677#define SYSCALL(esame,emu) .quad __s390_ ## emu 678 .globl sys_call_table_emu 679sys_call_table_emu: 680#include "asm/syscall_table.h" 681#undef SYSCALL 682#endif 683