1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * S390 low-level entry points. 4 * 5 * Copyright IBM Corp. 1999, 2012 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Hartmut Penner (hp@de.ibm.com), 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 9 */ 10 11#include <linux/export.h> 12#include <linux/init.h> 13#include <linux/linkage.h> 14#include <asm/asm-extable.h> 15#include <asm/alternative-asm.h> 16#include <asm/processor.h> 17#include <asm/cache.h> 18#include <asm/dwarf.h> 19#include <asm/errno.h> 20#include <asm/ptrace.h> 21#include <asm/thread_info.h> 22#include <asm/asm-offsets.h> 23#include <asm/unistd.h> 24#include <asm/page.h> 25#include <asm/sigp.h> 26#include <asm/irq.h> 27#include <asm/fpu-insn.h> 28#include <asm/setup.h> 29#include <asm/nmi.h> 30#include <asm/nospec-insn.h> 31 32_LPP_OFFSET = __LC_LPP 33 34 .macro STBEAR address 35 ALTERNATIVE "nop", ".insn s,0xb2010000,\address", 193 36 .endm 37 38 .macro LBEAR address 39 ALTERNATIVE "nop", ".insn s,0xb2000000,\address", 193 40 .endm 41 42 .macro LPSWEY address,lpswe 43 ALTERNATIVE "b \lpswe; nopr", ".insn siy,0xeb0000000071,\address,0", 193 44 .endm 45 46 .macro MBEAR reg 47 ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193 48 .endm 49 50 .macro CHECK_STACK savearea 51#ifdef CONFIG_CHECK_STACK 52 tml %r15,THREAD_SIZE - CONFIG_STACK_GUARD 53 lghi %r14,\savearea 54 jz stack_overflow 55#endif 56 .endm 57 58 .macro CHECK_VMAP_STACK savearea,oklabel 59#ifdef CONFIG_VMAP_STACK 60 lgr %r14,%r15 61 nill %r14,0x10000 - THREAD_SIZE 62 oill %r14,STACK_INIT_OFFSET 63 clg %r14,__LC_KERNEL_STACK 64 je \oklabel 65 clg %r14,__LC_ASYNC_STACK 66 je \oklabel 67 clg %r14,__LC_MCCK_STACK 68 je \oklabel 69 clg %r14,__LC_NODAT_STACK 70 je \oklabel 71 clg %r14,__LC_RESTART_STACK 72 je \oklabel 73 lghi %r14,\savearea 74 j stack_overflow 75#else 76 j \oklabel 77#endif 78 .endm 79 80 /* 81 * The TSTMSK macro generates a test-under-mask instruction by 82 * calculating the memory offset for the specified mask value. 83 * Mask value can be any constant. The macro shifts the mask 84 * value to calculate the memory offset for the test-under-mask 85 * instruction. 86 */ 87 .macro TSTMSK addr, mask, size=8, bytepos=0 88 .if (\bytepos < \size) && (\mask >> 8) 89 .if (\mask & 0xff) 90 .error "Mask exceeds byte boundary" 91 .endif 92 TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)" 93 .exitm 94 .endif 95 .ifeq \mask 96 .error "Mask must not be zero" 97 .endif 98 off = \size - \bytepos - 1 99 tm off+\addr, \mask 100 .endm 101 102 .macro BPOFF 103 ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", 82 104 .endm 105 106 .macro BPON 107 ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", 82 108 .endm 109 110 .macro BPENTER tif_ptr,tif_mask 111 ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \ 112 "j .+12; nop; nop", 82 113 .endm 114 115 .macro BPEXIT tif_ptr,tif_mask 116 TSTMSK \tif_ptr,\tif_mask 117 ALTERNATIVE "jz .+8; .insn rrf,0xb2e80000,0,0,12,0", \ 118 "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", 82 119 .endm 120 121#if IS_ENABLED(CONFIG_KVM) 122 .macro SIEEXIT sie_control 123 lg %r9,\sie_control # get control block pointer 124 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 125 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce 126 ni __LC_CPU_FLAGS+7,255-_CIF_SIE 127 larl %r9,sie_exit # skip forward to sie_exit 128 .endm 129#endif 130 131 .macro STACKLEAK_ERASE 132#ifdef CONFIG_GCC_PLUGIN_STACKLEAK 133 brasl %r14,stackleak_erase_on_task_stack 134#endif 135 .endm 136 137 GEN_BR_THUNK %r14 138 139 .section .kprobes.text, "ax" 140.Ldummy: 141 /* 142 * The following nop exists only in order to avoid that the next 143 * symbol starts at the beginning of the kprobes text section. 144 * In that case there would be several symbols at the same address. 145 * E.g. objdump would take an arbitrary symbol when disassembling 146 * the code. 147 * With the added nop in between this cannot happen. 148 */ 149 nop 0 150 151/* 152 * Scheduler resume function, called by __switch_to 153 * gpr2 = (task_struct *)prev 154 * gpr3 = (task_struct *)next 155 * Returns: 156 * gpr2 = prev 157 */ 158SYM_FUNC_START(__switch_to_asm) 159 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 160 lghi %r4,__TASK_stack 161 lghi %r1,__TASK_thread 162 llill %r5,STACK_INIT_OFFSET 163 stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev 164 lg %r15,0(%r4,%r3) # start of kernel stack of next 165 agr %r15,%r5 # end of kernel stack of next 166 stg %r3,__LC_CURRENT # store task struct of next 167 stg %r15,__LC_KERNEL_STACK # store end of kernel stack 168 lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next 169 aghi %r3,__TASK_pid 170 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next 171 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 172 ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40 173 BR_EX %r14 174SYM_FUNC_END(__switch_to_asm) 175 176#if IS_ENABLED(CONFIG_KVM) 177/* 178 * __sie64a calling convention: 179 * %r2 pointer to sie control block phys 180 * %r3 pointer to sie control block virt 181 * %r4 guest register save area 182 */ 183SYM_FUNC_START(__sie64a) 184 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers 185 lg %r12,__LC_CURRENT 186 stg %r2,__SF_SIE_CONTROL_PHYS(%r15) # save sie block physical.. 187 stg %r3,__SF_SIE_CONTROL(%r15) # ...and virtual addresses 188 stg %r4,__SF_SIE_SAVEAREA(%r15) # save guest register save area 189 xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0 190 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags 191 lmg %r0,%r13,0(%r4) # load guest gprs 0-13 192 lg %r14,__LC_GMAP # get gmap pointer 193 ltgr %r14,%r14 194 jz .Lsie_gmap 195 oi __LC_CPU_FLAGS+7,_CIF_SIE 196 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce 197.Lsie_gmap: 198 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 199 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now 200 tm __SIE_PROG20+3(%r14),3 # last exit... 201 jnz .Lsie_skip 202 lg %r14,__SF_SIE_CONTROL_PHYS(%r15) # get sie block phys addr 203 BPEXIT __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 204.Lsie_entry: 205 sie 0(%r14) 206# Let the next instruction be NOP to avoid triggering a machine check 207# and handling it in a guest as result of the instruction execution. 208 nopr 7 209.Lsie_leave: 210 BPOFF 211 BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 212.Lsie_skip: 213 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 214 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 215 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce 216 ni __LC_CPU_FLAGS+7,255-_CIF_SIE 217# some program checks are suppressing. C code (e.g. do_protection_exception) 218# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There 219# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. 220# Other instructions between __sie64a and .Lsie_done should not cause program 221# interrupts. So lets use 3 nops as a landing pad for all possible rewinds. 222.Lrewind_pad6: 223 nopr 7 224.Lrewind_pad4: 225 nopr 7 226.Lrewind_pad2: 227 nopr 7 228SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL) 229 lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area 230 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 231 xgr %r0,%r0 # clear guest registers to 232 xgr %r1,%r1 # prevent speculative use 233 xgr %r3,%r3 234 xgr %r4,%r4 235 xgr %r5,%r5 236 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 237 lg %r2,__SF_SIE_REASON(%r15) # return exit reason code 238 BR_EX %r14 239.Lsie_fault: 240 lghi %r14,-EFAULT 241 stg %r14,__SF_SIE_REASON(%r15) # set exit reason code 242 j sie_exit 243 244 EX_TABLE(.Lrewind_pad6,.Lsie_fault) 245 EX_TABLE(.Lrewind_pad4,.Lsie_fault) 246 EX_TABLE(.Lrewind_pad2,.Lsie_fault) 247 EX_TABLE(sie_exit,.Lsie_fault) 248SYM_FUNC_END(__sie64a) 249EXPORT_SYMBOL(__sie64a) 250EXPORT_SYMBOL(sie_exit) 251#endif 252 253/* 254 * SVC interrupt handler routine. System calls are synchronous events and 255 * are entered with interrupts disabled. 256 */ 257 258SYM_CODE_START(system_call) 259 stpt __LC_SYS_ENTER_TIMER 260 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 261 BPOFF 262 lghi %r14,0 263.Lsysc_per: 264 STBEAR __LC_LAST_BREAK 265 lctlg %c1,%c1,__LC_KERNEL_ASCE 266 lg %r15,__LC_KERNEL_STACK 267 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 268 stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 269 # clear user controlled register to prevent speculative use 270 xgr %r0,%r0 271 xgr %r1,%r1 272 xgr %r4,%r4 273 xgr %r5,%r5 274 xgr %r6,%r6 275 xgr %r7,%r7 276 xgr %r8,%r8 277 xgr %r9,%r9 278 xgr %r10,%r10 279 xgr %r11,%r11 280 la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 281 mvc __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC 282 MBEAR %r2 283 lgr %r3,%r14 284 brasl %r14,__do_syscall 285 STACKLEAK_ERASE 286 lctlg %c1,%c1,__LC_USER_ASCE 287 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 288 BPON 289 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) 290 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 291 stpt __LC_EXIT_TIMER 292 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 293SYM_CODE_END(system_call) 294 295# 296# a new process exits the kernel with ret_from_fork 297# 298SYM_CODE_START(ret_from_fork) 299 lgr %r3,%r11 300 brasl %r14,__ret_from_fork 301 STACKLEAK_ERASE 302 lctlg %c1,%c1,__LC_USER_ASCE 303 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 304 BPON 305 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) 306 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 307 stpt __LC_EXIT_TIMER 308 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 309SYM_CODE_END(ret_from_fork) 310 311/* 312 * Program check handler routine 313 */ 314 315SYM_CODE_START(pgm_check_handler) 316 stpt __LC_SYS_ENTER_TIMER 317 BPOFF 318 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 319 lgr %r10,%r15 320 lmg %r8,%r9,__LC_PGM_OLD_PSW 321 tmhh %r8,0x0001 # coming from user space? 322 jno .Lpgm_skip_asce 323 lctlg %c1,%c1,__LC_KERNEL_ASCE 324 j 3f # -> fault in user space 325.Lpgm_skip_asce: 3261: tmhh %r8,0x4000 # PER bit set in old PSW ? 327 jnz 2f # -> enabled, can't be a double fault 328 tm __LC_PGM_ILC+3,0x80 # check for per exception 329 jnz .Lpgm_svcper # -> single stepped svc 3302: CHECK_STACK __LC_SAVE_AREA_SYNC 331 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 332 # CHECK_VMAP_STACK branches to stack_overflow or 4f 333 CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f 3343: lg %r15,__LC_KERNEL_STACK 3354: la %r11,STACK_FRAME_OVERHEAD(%r15) 336 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 337 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 338 stmg %r0,%r7,__PT_R0(%r11) 339 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 340 mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK 341 stctg %c1,%c1,__PT_CR1(%r11) 342#if IS_ENABLED(CONFIG_KVM) 343 ltg %r12,__LC_GMAP 344 jz 5f 345 clc __GMAP_ASCE(8,%r12), __PT_CR1(%r11) 346 jne 5f 347 BPENTER __SF_SIE_FLAGS(%r10),_TIF_ISOLATE_BP_GUEST 348 SIEEXIT __SF_SIE_CONTROL(%r10) 349#endif 3505: stmg %r8,%r9,__PT_PSW(%r11) 351 # clear user controlled registers to prevent speculative use 352 xgr %r0,%r0 353 xgr %r1,%r1 354 xgr %r3,%r3 355 xgr %r4,%r4 356 xgr %r5,%r5 357 xgr %r6,%r6 358 xgr %r7,%r7 359 lgr %r2,%r11 360 brasl %r14,__do_pgm_check 361 tmhh %r8,0x0001 # returning to user space? 362 jno .Lpgm_exit_kernel 363 STACKLEAK_ERASE 364 lctlg %c1,%c1,__LC_USER_ASCE 365 BPON 366 stpt __LC_EXIT_TIMER 367.Lpgm_exit_kernel: 368 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 369 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) 370 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 371 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 372 373# 374# single stepped system call 375# 376.Lpgm_svcper: 377 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW 378 larl %r14,.Lsysc_per 379 stg %r14,__LC_RETURN_PSW+8 380 lghi %r14,1 381 LBEAR __LC_PGM_LAST_BREAK 382 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per 383SYM_CODE_END(pgm_check_handler) 384 385/* 386 * Interrupt handler macro used for external and IO interrupts. 387 */ 388.macro INT_HANDLER name,lc_old_psw,handler 389SYM_CODE_START(\name) 390 stckf __LC_INT_CLOCK 391 stpt __LC_SYS_ENTER_TIMER 392 STBEAR __LC_LAST_BREAK 393 BPOFF 394 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 395 lmg %r8,%r9,\lc_old_psw 396 tmhh %r8,0x0001 # interrupting from user ? 397 jnz 1f 398#if IS_ENABLED(CONFIG_KVM) 399 TSTMSK __LC_CPU_FLAGS,_CIF_SIE 400 jz 0f 401 BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 402 SIEEXIT __SF_SIE_CONTROL(%r15) 403#endif 4040: CHECK_STACK __LC_SAVE_AREA_ASYNC 405 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 406 j 2f 4071: lctlg %c1,%c1,__LC_KERNEL_ASCE 408 lg %r15,__LC_KERNEL_STACK 4092: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 410 la %r11,STACK_FRAME_OVERHEAD(%r15) 411 stmg %r0,%r7,__PT_R0(%r11) 412 # clear user controlled registers to prevent speculative use 413 xgr %r0,%r0 414 xgr %r1,%r1 415 xgr %r3,%r3 416 xgr %r4,%r4 417 xgr %r5,%r5 418 xgr %r6,%r6 419 xgr %r7,%r7 420 xgr %r10,%r10 421 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 422 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 423 MBEAR %r11 424 stmg %r8,%r9,__PT_PSW(%r11) 425 lgr %r2,%r11 # pass pointer to pt_regs 426 brasl %r14,\handler 427 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 428 tmhh %r8,0x0001 # returning to user ? 429 jno 2f 430 STACKLEAK_ERASE 431 lctlg %c1,%c1,__LC_USER_ASCE 432 BPON 433 stpt __LC_EXIT_TIMER 4342: LBEAR __PT_LAST_BREAK(%r11) 435 lmg %r0,%r15,__PT_R0(%r11) 436 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 437SYM_CODE_END(\name) 438.endm 439 440INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq 441INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq 442 443/* 444 * Machine check handler routines 445 */ 446SYM_CODE_START(mcck_int_handler) 447 BPOFF 448 lmg %r8,%r9,__LC_MCK_OLD_PSW 449 TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE 450 jo .Lmcck_panic # yes -> rest of mcck code invalid 451 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID 452 jno .Lmcck_panic # control registers invalid -> panic 453 ptlb 454 lghi %r14,__LC_CPU_TIMER_SAVE_AREA 455 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 456 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID 457 jo 3f 458 la %r14,__LC_SYS_ENTER_TIMER 459 clc 0(8,%r14),__LC_EXIT_TIMER 460 jl 1f 461 la %r14,__LC_EXIT_TIMER 4621: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER 463 jl 2f 464 la %r14,__LC_LAST_UPDATE_TIMER 4652: spt 0(%r14) 466 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 4673: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID 468 jno .Lmcck_panic 469 tmhh %r8,0x0001 # interrupting from user ? 470 jnz .Lmcck_user 471 TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID 472 jno .Lmcck_panic 473#if IS_ENABLED(CONFIG_KVM) 474 TSTMSK __LC_CPU_FLAGS,_CIF_SIE 475 jz .Lmcck_user 476 # Need to compare the address instead of a CIF_SIE* flag. 477 # Otherwise there would be a race between setting the flag 478 # and entering SIE (or leaving and clearing the flag). This 479 # would cause machine checks targeted at the guest to be 480 # handled by the host. 481 larl %r14,.Lsie_entry 482 clgrjl %r9,%r14, 4f 483 larl %r14,.Lsie_leave 484 clgrjhe %r9,%r14, 4f 485 oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST 4864: BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 487 SIEEXIT __SF_SIE_CONTROL(%r15) 488#endif 489.Lmcck_user: 490 lg %r15,__LC_MCCK_STACK 491 la %r11,STACK_FRAME_OVERHEAD(%r15) 492 stctg %c1,%c1,__PT_CR1(%r11) 493 lctlg %c1,%c1,__LC_KERNEL_ASCE 494 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 495 lghi %r14,__LC_GPREGS_SAVE_AREA+64 496 stmg %r0,%r7,__PT_R0(%r11) 497 # clear user controlled registers to prevent speculative use 498 xgr %r0,%r0 499 xgr %r1,%r1 500 xgr %r3,%r3 501 xgr %r4,%r4 502 xgr %r5,%r5 503 xgr %r6,%r6 504 xgr %r7,%r7 505 xgr %r10,%r10 506 mvc __PT_R8(64,%r11),0(%r14) 507 stmg %r8,%r9,__PT_PSW(%r11) 508 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 509 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 510 lgr %r2,%r11 # pass pointer to pt_regs 511 brasl %r14,s390_do_machine_check 512 lctlg %c1,%c1,__PT_CR1(%r11) 513 lmg %r0,%r10,__PT_R0(%r11) 514 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW 515 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 516 jno 0f 517 BPON 518 stpt __LC_EXIT_TIMER 5190: ALTERNATIVE "nop", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193 520 LBEAR 0(%r12) 521 lmg %r11,%r15,__PT_R11(%r11) 522 LPSWEY __LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE 523 524.Lmcck_panic: 525 /* 526 * Iterate over all possible CPU addresses in the range 0..0xffff 527 * and stop each CPU using signal processor. Use compare and swap 528 * to allow just one CPU-stopper and prevent concurrent CPUs from 529 * stopping each other while leaving the others running. 530 */ 531 lhi %r5,0 532 lhi %r6,1 533 larl %r7,stop_lock 534 cs %r5,%r6,0(%r7) # single CPU-stopper only 535 jnz 4f 536 larl %r7,this_cpu 537 stap 0(%r7) # this CPU address 538 lh %r4,0(%r7) 539 nilh %r4,0 540 lhi %r0,1 541 sll %r0,16 # CPU counter 542 lhi %r3,0 # next CPU address 5430: cr %r3,%r4 544 je 2f 5451: sigp %r1,%r3,SIGP_STOP # stop next CPU 546 brc SIGP_CC_BUSY,1b 5472: ahi %r3,1 548 brct %r0,0b 5493: sigp %r1,%r4,SIGP_STOP # stop this CPU 550 brc SIGP_CC_BUSY,3b 5514: j 4b 552SYM_CODE_END(mcck_int_handler) 553 554SYM_CODE_START(restart_int_handler) 555 ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40 556 stg %r15,__LC_SAVE_AREA_RESTART 557 TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4 558 jz 0f 559 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA 5600: larl %r15,daton_psw 561 lpswe 0(%r15) # turn dat on, keep irqs off 562.Ldaton: 563 lg %r15,__LC_RESTART_STACK 564 xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15) 565 stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 566 mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART 567 mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW 568 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) 569 lg %r1,__LC_RESTART_FN # load fn, parm & source cpu 570 lg %r2,__LC_RESTART_DATA 571 lgf %r3,__LC_RESTART_SOURCE 572 ltgr %r3,%r3 # test source cpu address 573 jm 1f # negative -> skip source stop 5740: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu 575 brc 10,0b # wait for status stored 5761: basr %r14,%r1 # call function 577 stap __SF_EMPTY(%r15) # store cpu address 578 llgh %r3,__SF_EMPTY(%r15) 5792: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu 580 brc 2,2b 5813: j 3b 582SYM_CODE_END(restart_int_handler) 583 584 .section .kprobes.text, "ax" 585 586#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK) 587/* 588 * The synchronous or the asynchronous stack overflowed. We are dead. 589 * No need to properly save the registers, we are going to panic anyway. 590 * Setup a pt_regs so that show_trace can provide a good call trace. 591 */ 592SYM_CODE_START(stack_overflow) 593 lg %r15,__LC_NODAT_STACK # change to panic stack 594 la %r11,STACK_FRAME_OVERHEAD(%r15) 595 stmg %r0,%r7,__PT_R0(%r11) 596 stmg %r8,%r9,__PT_PSW(%r11) 597 mvc __PT_R8(64,%r11),0(%r14) 598 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 599 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 600 lgr %r2,%r11 # pass pointer to pt_regs 601 jg kernel_stack_overflow 602SYM_CODE_END(stack_overflow) 603#endif 604 605 .section .data, "aw" 606 .balign 4 607SYM_DATA_LOCAL(stop_lock, .long 0) 608SYM_DATA_LOCAL(this_cpu, .short 0) 609 .balign 8 610SYM_DATA_START_LOCAL(daton_psw) 611 .quad PSW_KERNEL_BITS 612 .quad .Ldaton 613SYM_DATA_END(daton_psw) 614 615 .section .rodata, "a" 616 .balign 8 617#define SYSCALL(esame,emu) .quad __s390x_ ## esame 618SYM_DATA_START(sys_call_table) 619#include "asm/syscall_table.h" 620SYM_DATA_END(sys_call_table) 621#undef SYSCALL 622 623#ifdef CONFIG_COMPAT 624 625#define SYSCALL(esame,emu) .quad __s390_ ## emu 626SYM_DATA_START(sys_call_table_emu) 627#include "asm/syscall_table.h" 628SYM_DATA_END(sys_call_table_emu) 629#undef SYSCALL 630#endif 631