1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * S390 low-level entry points. 4 * 5 * Copyright IBM Corp. 1999, 2012 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Hartmut Penner (hp@de.ibm.com), 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 9 */ 10 11#include <linux/init.h> 12#include <linux/linkage.h> 13#include <asm/asm-extable.h> 14#include <asm/alternative-asm.h> 15#include <asm/processor.h> 16#include <asm/cache.h> 17#include <asm/dwarf.h> 18#include <asm/errno.h> 19#include <asm/ptrace.h> 20#include <asm/thread_info.h> 21#include <asm/asm-offsets.h> 22#include <asm/unistd.h> 23#include <asm/page.h> 24#include <asm/sigp.h> 25#include <asm/irq.h> 26#include <asm/vx-insn.h> 27#include <asm/setup.h> 28#include <asm/nmi.h> 29#include <asm/export.h> 30#include <asm/nospec-insn.h> 31 32STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER 33STACK_SIZE = 1 << STACK_SHIFT 34STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE 35 36_LPP_OFFSET = __LC_LPP 37 38 .macro STBEAR address 39 ALTERNATIVE "nop", ".insn s,0xb2010000,\address", 193 40 .endm 41 42 .macro LBEAR address 43 ALTERNATIVE "nop", ".insn s,0xb2000000,\address", 193 44 .endm 45 46 .macro LPSWEY address,lpswe 47 ALTERNATIVE "b \lpswe; nopr", ".insn siy,0xeb0000000071,\address,0", 193 48 .endm 49 50 .macro MBEAR reg 51 ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193 52 .endm 53 54 .macro CHECK_STACK savearea 55#ifdef CONFIG_CHECK_STACK 56 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 57 lghi %r14,\savearea 58 jz stack_overflow 59#endif 60 .endm 61 62 .macro CHECK_VMAP_STACK savearea,oklabel 63#ifdef CONFIG_VMAP_STACK 64 lgr %r14,%r15 65 nill %r14,0x10000 - STACK_SIZE 66 oill %r14,STACK_INIT 67 clg %r14,__LC_KERNEL_STACK 68 je \oklabel 69 clg %r14,__LC_ASYNC_STACK 70 je \oklabel 71 clg %r14,__LC_MCCK_STACK 72 je \oklabel 73 clg %r14,__LC_NODAT_STACK 74 je \oklabel 75 clg %r14,__LC_RESTART_STACK 76 je \oklabel 77 lghi %r14,\savearea 78 j stack_overflow 79#else 80 j \oklabel 81#endif 82 .endm 83 84 /* 85 * The TSTMSK macro generates a test-under-mask instruction by 86 * calculating the memory offset for the specified mask value. 87 * Mask value can be any constant. The macro shifts the mask 88 * value to calculate the memory offset for the test-under-mask 89 * instruction. 90 */ 91 .macro TSTMSK addr, mask, size=8, bytepos=0 92 .if (\bytepos < \size) && (\mask >> 8) 93 .if (\mask & 0xff) 94 .error "Mask exceeds byte boundary" 95 .endif 96 TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)" 97 .exitm 98 .endif 99 .ifeq \mask 100 .error "Mask must not be zero" 101 .endif 102 off = \size - \bytepos - 1 103 tm off+\addr, \mask 104 .endm 105 106 .macro BPOFF 107 ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", 82 108 .endm 109 110 .macro BPON 111 ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", 82 112 .endm 113 114 .macro BPENTER tif_ptr,tif_mask 115 ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \ 116 "j .+12; nop; nop", 82 117 .endm 118 119 .macro BPEXIT tif_ptr,tif_mask 120 TSTMSK \tif_ptr,\tif_mask 121 ALTERNATIVE "jz .+8; .insn rrf,0xb2e80000,0,0,12,0", \ 122 "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", 82 123 .endm 124 125#if IS_ENABLED(CONFIG_KVM) 126 /* 127 * The OUTSIDE macro jumps to the provided label in case the value 128 * in the provided register is outside of the provided range. The 129 * macro is useful for checking whether a PSW stored in a register 130 * pair points inside or outside of a block of instructions. 131 * @reg: register to check 132 * @start: start of the range 133 * @end: end of the range 134 * @outside_label: jump here if @reg is outside of [@start..@end) 135 */ 136 .macro OUTSIDE reg,start,end,outside_label 137 lgr %r14,\reg 138 larl %r13,\start 139 slgr %r14,%r13 140#ifdef CONFIG_AS_IS_LLVM 141 clgfrl %r14,.Lrange_size\@ 142#else 143 clgfi %r14,\end - \start 144#endif 145 jhe \outside_label 146#ifdef CONFIG_AS_IS_LLVM 147 .section .rodata, "a" 148 .align 4 149.Lrange_size\@: 150 .long \end - \start 151 .previous 152#endif 153 .endm 154 155 .macro SIEEXIT 156 lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer 157 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 158 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce 159 larl %r9,sie_exit # skip forward to sie_exit 160 .endm 161#endif 162 163 GEN_BR_THUNK %r14 164 165 .section .kprobes.text, "ax" 166.Ldummy: 167 /* 168 * This nop exists only in order to avoid that __bpon starts at 169 * the beginning of the kprobes text section. In that case we would 170 * have several symbols at the same address. E.g. objdump would take 171 * an arbitrary symbol name when disassembling this code. 172 * With the added nop in between the __bpon symbol is unique 173 * again. 174 */ 175 nop 0 176 177ENTRY(__bpon) 178 .globl __bpon 179 BPON 180 BR_EX %r14 181ENDPROC(__bpon) 182 183/* 184 * Scheduler resume function, called by switch_to 185 * gpr2 = (task_struct *) prev 186 * gpr3 = (task_struct *) next 187 * Returns: 188 * gpr2 = prev 189 */ 190ENTRY(__switch_to) 191 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 192 lghi %r4,__TASK_stack 193 lghi %r1,__TASK_thread 194 llill %r5,STACK_INIT 195 stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev 196 lg %r15,0(%r4,%r3) # start of kernel stack of next 197 agr %r15,%r5 # end of kernel stack of next 198 stg %r3,__LC_CURRENT # store task struct of next 199 stg %r15,__LC_KERNEL_STACK # store end of kernel stack 200 lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next 201 aghi %r3,__TASK_pid 202 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next 203 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 204 ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40 205 BR_EX %r14 206ENDPROC(__switch_to) 207 208#if IS_ENABLED(CONFIG_KVM) 209/* 210 * __sie64a calling convention: 211 * %r2 pointer to sie control block phys 212 * %r3 pointer to sie control block virt 213 * %r4 guest register save area 214 */ 215ENTRY(__sie64a) 216 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers 217 lg %r12,__LC_CURRENT 218 stg %r2,__SF_SIE_CONTROL_PHYS(%r15) # save sie block physical.. 219 stg %r3,__SF_SIE_CONTROL(%r15) # ...and virtual addresses 220 stg %r4,__SF_SIE_SAVEAREA(%r15) # save guest register save area 221 xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0 222 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags 223 lmg %r0,%r13,0(%r4) # load guest gprs 0-13 224 lg %r14,__LC_GMAP # get gmap pointer 225 ltgr %r14,%r14 226 jz .Lsie_gmap 227 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce 228.Lsie_gmap: 229 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 230 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now 231 tm __SIE_PROG20+3(%r14),3 # last exit... 232 jnz .Lsie_skip 233 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 234 jo .Lsie_skip # exit if fp/vx regs changed 235 lg %r14,__SF_SIE_CONTROL_PHYS(%r15) # get sie block phys addr 236 BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 237.Lsie_entry: 238 sie 0(%r14) 239# Let the next instruction be NOP to avoid triggering a machine check 240# and handling it in a guest as result of the instruction execution. 241 nopr 7 242.Lsie_leave: 243 BPOFF 244 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 245.Lsie_skip: 246 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 247 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 248 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce 249.Lsie_done: 250# some program checks are suppressing. C code (e.g. do_protection_exception) 251# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There 252# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. 253# Other instructions between __sie64a and .Lsie_done should not cause program 254# interrupts. So lets use 3 nops as a landing pad for all possible rewinds. 255.Lrewind_pad6: 256 nopr 7 257.Lrewind_pad4: 258 nopr 7 259.Lrewind_pad2: 260 nopr 7 261 .globl sie_exit 262sie_exit: 263 lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area 264 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 265 xgr %r0,%r0 # clear guest registers to 266 xgr %r1,%r1 # prevent speculative use 267 xgr %r3,%r3 268 xgr %r4,%r4 269 xgr %r5,%r5 270 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 271 lg %r2,__SF_SIE_REASON(%r15) # return exit reason code 272 BR_EX %r14 273.Lsie_fault: 274 lghi %r14,-EFAULT 275 stg %r14,__SF_SIE_REASON(%r15) # set exit reason code 276 j sie_exit 277 278 EX_TABLE(.Lrewind_pad6,.Lsie_fault) 279 EX_TABLE(.Lrewind_pad4,.Lsie_fault) 280 EX_TABLE(.Lrewind_pad2,.Lsie_fault) 281 EX_TABLE(sie_exit,.Lsie_fault) 282ENDPROC(__sie64a) 283EXPORT_SYMBOL(__sie64a) 284EXPORT_SYMBOL(sie_exit) 285#endif 286 287/* 288 * SVC interrupt handler routine. System calls are synchronous events and 289 * are entered with interrupts disabled. 290 */ 291 292ENTRY(system_call) 293 stpt __LC_SYS_ENTER_TIMER 294 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 295 BPOFF 296 lghi %r14,0 297.Lsysc_per: 298 STBEAR __LC_LAST_BREAK 299 lctlg %c1,%c1,__LC_KERNEL_ASCE 300 lg %r12,__LC_CURRENT 301 lg %r15,__LC_KERNEL_STACK 302 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 303 stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 304 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 305 # clear user controlled register to prevent speculative use 306 xgr %r0,%r0 307 xgr %r1,%r1 308 xgr %r4,%r4 309 xgr %r5,%r5 310 xgr %r6,%r6 311 xgr %r7,%r7 312 xgr %r8,%r8 313 xgr %r9,%r9 314 xgr %r10,%r10 315 xgr %r11,%r11 316 la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 317 mvc __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC 318 MBEAR %r2 319 lgr %r3,%r14 320 brasl %r14,__do_syscall 321 lctlg %c1,%c1,__LC_USER_ASCE 322 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 323 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 324 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) 325 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 326 stpt __LC_EXIT_TIMER 327 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 328ENDPROC(system_call) 329 330# 331# a new process exits the kernel with ret_from_fork 332# 333ENTRY(ret_from_fork) 334 lgr %r3,%r11 335 brasl %r14,__ret_from_fork 336 lctlg %c1,%c1,__LC_USER_ASCE 337 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 338 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 339 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) 340 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 341 stpt __LC_EXIT_TIMER 342 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 343ENDPROC(ret_from_fork) 344 345/* 346 * Program check handler routine 347 */ 348 349ENTRY(pgm_check_handler) 350 stpt __LC_SYS_ENTER_TIMER 351 BPOFF 352 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 353 lg %r12,__LC_CURRENT 354 lghi %r10,0 355 lmg %r8,%r9,__LC_PGM_OLD_PSW 356 tmhh %r8,0x0001 # coming from user space? 357 jno .Lpgm_skip_asce 358 lctlg %c1,%c1,__LC_KERNEL_ASCE 359 j 3f # -> fault in user space 360.Lpgm_skip_asce: 361#if IS_ENABLED(CONFIG_KVM) 362 # cleanup critical section for program checks in __sie64a 363 OUTSIDE %r9,.Lsie_gmap,.Lsie_done,1f 364 SIEEXIT 365 lghi %r10,_PIF_GUEST_FAULT 366#endif 3671: tmhh %r8,0x4000 # PER bit set in old PSW ? 368 jnz 2f # -> enabled, can't be a double fault 369 tm __LC_PGM_ILC+3,0x80 # check for per exception 370 jnz .Lpgm_svcper # -> single stepped svc 3712: CHECK_STACK __LC_SAVE_AREA_SYNC 372 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 373 # CHECK_VMAP_STACK branches to stack_overflow or 4f 374 CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f 3753: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 376 lg %r15,__LC_KERNEL_STACK 3774: la %r11,STACK_FRAME_OVERHEAD(%r15) 378 stg %r10,__PT_FLAGS(%r11) 379 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 380 stmg %r0,%r7,__PT_R0(%r11) 381 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 382 mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK 383 stmg %r8,%r9,__PT_PSW(%r11) 384 385 # clear user controlled registers to prevent speculative use 386 xgr %r0,%r0 387 xgr %r1,%r1 388 xgr %r3,%r3 389 xgr %r4,%r4 390 xgr %r5,%r5 391 xgr %r6,%r6 392 xgr %r7,%r7 393 lgr %r2,%r11 394 brasl %r14,__do_pgm_check 395 tmhh %r8,0x0001 # returning to user space? 396 jno .Lpgm_exit_kernel 397 lctlg %c1,%c1,__LC_USER_ASCE 398 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 399 stpt __LC_EXIT_TIMER 400.Lpgm_exit_kernel: 401 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 402 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) 403 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 404 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 405 406# 407# single stepped system call 408# 409.Lpgm_svcper: 410 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW 411 larl %r14,.Lsysc_per 412 stg %r14,__LC_RETURN_PSW+8 413 lghi %r14,1 414 LBEAR __LC_PGM_LAST_BREAK 415 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per 416ENDPROC(pgm_check_handler) 417 418/* 419 * Interrupt handler macro used for external and IO interrupts. 420 */ 421.macro INT_HANDLER name,lc_old_psw,handler 422ENTRY(\name) 423 stckf __LC_INT_CLOCK 424 stpt __LC_SYS_ENTER_TIMER 425 STBEAR __LC_LAST_BREAK 426 BPOFF 427 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 428 lg %r12,__LC_CURRENT 429 lmg %r8,%r9,\lc_old_psw 430 tmhh %r8,0x0001 # interrupting from user ? 431 jnz 1f 432#if IS_ENABLED(CONFIG_KVM) 433 OUTSIDE %r9,.Lsie_gmap,.Lsie_done,0f 434 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 435 SIEEXIT 436#endif 4370: CHECK_STACK __LC_SAVE_AREA_ASYNC 438 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 439 j 2f 4401: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 441 lctlg %c1,%c1,__LC_KERNEL_ASCE 442 lg %r15,__LC_KERNEL_STACK 4432: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 444 la %r11,STACK_FRAME_OVERHEAD(%r15) 445 stmg %r0,%r7,__PT_R0(%r11) 446 # clear user controlled registers to prevent speculative use 447 xgr %r0,%r0 448 xgr %r1,%r1 449 xgr %r3,%r3 450 xgr %r4,%r4 451 xgr %r5,%r5 452 xgr %r6,%r6 453 xgr %r7,%r7 454 xgr %r10,%r10 455 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 456 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 457 MBEAR %r11 458 stmg %r8,%r9,__PT_PSW(%r11) 459 lgr %r2,%r11 # pass pointer to pt_regs 460 brasl %r14,\handler 461 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 462 tmhh %r8,0x0001 # returning to user ? 463 jno 2f 464 lctlg %c1,%c1,__LC_USER_ASCE 465 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 466 stpt __LC_EXIT_TIMER 4672: LBEAR __PT_LAST_BREAK(%r11) 468 lmg %r0,%r15,__PT_R0(%r11) 469 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 470ENDPROC(\name) 471.endm 472 473INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq 474INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq 475 476/* 477 * Load idle PSW. 478 */ 479ENTRY(psw_idle) 480 stg %r14,(__SF_GPRS+8*8)(%r15) 481 stg %r3,__SF_EMPTY(%r15) 482 larl %r1,psw_idle_exit 483 stg %r1,__SF_EMPTY+8(%r15) 484 larl %r1,smp_cpu_mtid 485 llgf %r1,0(%r1) 486 ltgr %r1,%r1 487 jz .Lpsw_idle_stcctm 488 .insn rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2) 489.Lpsw_idle_stcctm: 490 oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT 491 BPON 492 stckf __CLOCK_IDLE_ENTER(%r2) 493 stpt __TIMER_IDLE_ENTER(%r2) 494 lpswe __SF_EMPTY(%r15) 495.globl psw_idle_exit 496psw_idle_exit: 497 BR_EX %r14 498ENDPROC(psw_idle) 499 500/* 501 * Machine check handler routines 502 */ 503ENTRY(mcck_int_handler) 504 stckf __LC_MCCK_CLOCK 505 BPOFF 506 la %r1,4095 # validate r1 507 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer 508 LBEAR __LC_LAST_BREAK_SAVE_AREA-4095(%r1) # validate bear 509 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs 510 lg %r12,__LC_CURRENT 511 lmg %r8,%r9,__LC_MCK_OLD_PSW 512 TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE 513 jo .Lmcck_panic # yes -> rest of mcck code invalid 514 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID 515 jno .Lmcck_panic # control registers invalid -> panic 516 la %r14,4095 517 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs 518 ptlb 519 lghi %r14,__LC_CPU_TIMER_SAVE_AREA 520 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 521 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID 522 jo 3f 523 la %r14,__LC_SYS_ENTER_TIMER 524 clc 0(8,%r14),__LC_EXIT_TIMER 525 jl 1f 526 la %r14,__LC_EXIT_TIMER 5271: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER 528 jl 2f 529 la %r14,__LC_LAST_UPDATE_TIMER 5302: spt 0(%r14) 531 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 5323: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID 533 jno .Lmcck_panic 534 tmhh %r8,0x0001 # interrupting from user ? 535 jnz .Lmcck_user 536 TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID 537 jno .Lmcck_panic 538#if IS_ENABLED(CONFIG_KVM) 539 OUTSIDE %r9,.Lsie_gmap,.Lsie_done,.Lmcck_stack 540 OUTSIDE %r9,.Lsie_entry,.Lsie_leave,4f 541 oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST 5424: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 543 SIEEXIT 544 j .Lmcck_stack 545#endif 546.Lmcck_user: 547 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 548.Lmcck_stack: 549 lg %r15,__LC_MCCK_STACK 550 la %r11,STACK_FRAME_OVERHEAD(%r15) 551 stctg %c1,%c1,__PT_CR1(%r11) 552 lctlg %c1,%c1,__LC_KERNEL_ASCE 553 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 554 lghi %r14,__LC_GPREGS_SAVE_AREA+64 555 stmg %r0,%r7,__PT_R0(%r11) 556 # clear user controlled registers to prevent speculative use 557 xgr %r0,%r0 558 xgr %r1,%r1 559 xgr %r3,%r3 560 xgr %r4,%r4 561 xgr %r5,%r5 562 xgr %r6,%r6 563 xgr %r7,%r7 564 xgr %r10,%r10 565 mvc __PT_R8(64,%r11),0(%r14) 566 stmg %r8,%r9,__PT_PSW(%r11) 567 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 568 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 569 lgr %r2,%r11 # pass pointer to pt_regs 570 brasl %r14,s390_do_machine_check 571 cghi %r2,0 572 je .Lmcck_return 573 lg %r1,__LC_KERNEL_STACK # switch to kernel stack 574 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 575 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 576 la %r11,STACK_FRAME_OVERHEAD(%r1) 577 lgr %r2,%r11 578 lgr %r15,%r1 579 brasl %r14,s390_handle_mcck 580.Lmcck_return: 581 lctlg %c1,%c1,__PT_CR1(%r11) 582 lmg %r0,%r10,__PT_R0(%r11) 583 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW 584 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 585 jno 0f 586 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 587 stpt __LC_EXIT_TIMER 5880: ALTERNATIVE "nop", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193 589 LBEAR 0(%r12) 590 lmg %r11,%r15,__PT_R11(%r11) 591 LPSWEY __LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE 592 593.Lmcck_panic: 594 /* 595 * Iterate over all possible CPU addresses in the range 0..0xffff 596 * and stop each CPU using signal processor. Use compare and swap 597 * to allow just one CPU-stopper and prevent concurrent CPUs from 598 * stopping each other while leaving the others running. 599 */ 600 lhi %r5,0 601 lhi %r6,1 602 larl %r7,.Lstop_lock 603 cs %r5,%r6,0(%r7) # single CPU-stopper only 604 jnz 4f 605 larl %r7,.Lthis_cpu 606 stap 0(%r7) # this CPU address 607 lh %r4,0(%r7) 608 nilh %r4,0 609 lhi %r0,1 610 sll %r0,16 # CPU counter 611 lhi %r3,0 # next CPU address 6120: cr %r3,%r4 613 je 2f 6141: sigp %r1,%r3,SIGP_STOP # stop next CPU 615 brc SIGP_CC_BUSY,1b 6162: ahi %r3,1 617 brct %r0,0b 6183: sigp %r1,%r4,SIGP_STOP # stop this CPU 619 brc SIGP_CC_BUSY,3b 6204: j 4b 621ENDPROC(mcck_int_handler) 622 623ENTRY(restart_int_handler) 624 ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40 625 stg %r15,__LC_SAVE_AREA_RESTART 626 TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4 627 jz 0f 628 la %r15,4095 629 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r15) 6300: larl %r15,.Lstosm_tmp 631 stosm 0(%r15),0x04 # turn dat on, keep irqs off 632 lg %r15,__LC_RESTART_STACK 633 xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15) 634 stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 635 mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART 636 mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW 637 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) 638 lg %r1,__LC_RESTART_FN # load fn, parm & source cpu 639 lg %r2,__LC_RESTART_DATA 640 lgf %r3,__LC_RESTART_SOURCE 641 ltgr %r3,%r3 # test source cpu address 642 jm 1f # negative -> skip source stop 6430: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu 644 brc 10,0b # wait for status stored 6451: basr %r14,%r1 # call function 646 stap __SF_EMPTY(%r15) # store cpu address 647 llgh %r3,__SF_EMPTY(%r15) 6482: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu 649 brc 2,2b 6503: j 3b 651ENDPROC(restart_int_handler) 652 653 .section .kprobes.text, "ax" 654 655#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK) 656/* 657 * The synchronous or the asynchronous stack overflowed. We are dead. 658 * No need to properly save the registers, we are going to panic anyway. 659 * Setup a pt_regs so that show_trace can provide a good call trace. 660 */ 661ENTRY(stack_overflow) 662 lg %r15,__LC_NODAT_STACK # change to panic stack 663 la %r11,STACK_FRAME_OVERHEAD(%r15) 664 stmg %r0,%r7,__PT_R0(%r11) 665 stmg %r8,%r9,__PT_PSW(%r11) 666 mvc __PT_R8(64,%r11),0(%r14) 667 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 668 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 669 lgr %r2,%r11 # pass pointer to pt_regs 670 jg kernel_stack_overflow 671ENDPROC(stack_overflow) 672#endif 673 674 .section .data, "aw" 675 .align 4 676.Lstop_lock: .long 0 677.Lthis_cpu: .short 0 678.Lstosm_tmp: .byte 0 679 .section .rodata, "a" 680#define SYSCALL(esame,emu) .quad __s390x_ ## esame 681 .globl sys_call_table 682sys_call_table: 683#include "asm/syscall_table.h" 684#undef SYSCALL 685 686#ifdef CONFIG_COMPAT 687 688#define SYSCALL(esame,emu) .quad __s390_ ## emu 689 .globl sys_call_table_emu 690sys_call_table_emu: 691#include "asm/syscall_table.h" 692#undef SYSCALL 693#endif 694