1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * S390 low-level entry points. 4 * 5 * Copyright IBM Corp. 1999, 2012 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Hartmut Penner (hp@de.ibm.com), 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 9 */ 10 11#include <linux/export.h> 12#include <linux/init.h> 13#include <linux/linkage.h> 14#include <asm/asm-extable.h> 15#include <asm/alternative-asm.h> 16#include <asm/processor.h> 17#include <asm/cache.h> 18#include <asm/dwarf.h> 19#include <asm/errno.h> 20#include <asm/ptrace.h> 21#include <asm/thread_info.h> 22#include <asm/asm-offsets.h> 23#include <asm/unistd.h> 24#include <asm/page.h> 25#include <asm/sigp.h> 26#include <asm/irq.h> 27#include <asm/fpu-insn.h> 28#include <asm/setup.h> 29#include <asm/nmi.h> 30#include <asm/nospec-insn.h> 31 32_LPP_OFFSET = __LC_LPP 33 34 .macro STBEAR address 35 ALTERNATIVE "nop", ".insn s,0xb2010000,\address", 193 36 .endm 37 38 .macro LBEAR address 39 ALTERNATIVE "nop", ".insn s,0xb2000000,\address", 193 40 .endm 41 42 .macro LPSWEY address,lpswe 43 ALTERNATIVE "b \lpswe; nopr", ".insn siy,0xeb0000000071,\address,0", 193 44 .endm 45 46 .macro MBEAR reg 47 ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193 48 .endm 49 50 .macro CHECK_STACK savearea 51#ifdef CONFIG_CHECK_STACK 52 tml %r15,THREAD_SIZE - CONFIG_STACK_GUARD 53 lghi %r14,\savearea 54 jz stack_overflow 55#endif 56 .endm 57 58 .macro CHECK_VMAP_STACK savearea,oklabel 59#ifdef CONFIG_VMAP_STACK 60 lgr %r14,%r15 61 nill %r14,0x10000 - THREAD_SIZE 62 oill %r14,STACK_INIT_OFFSET 63 clg %r14,__LC_KERNEL_STACK 64 je \oklabel 65 clg %r14,__LC_ASYNC_STACK 66 je \oklabel 67 clg %r14,__LC_MCCK_STACK 68 je \oklabel 69 clg %r14,__LC_NODAT_STACK 70 je \oklabel 71 clg %r14,__LC_RESTART_STACK 72 je \oklabel 73 lghi %r14,\savearea 74 j stack_overflow 75#else 76 j \oklabel 77#endif 78 .endm 79 80 /* 81 * The TSTMSK macro generates a test-under-mask instruction by 82 * calculating the memory offset for the specified mask value. 83 * Mask value can be any constant. The macro shifts the mask 84 * value to calculate the memory offset for the test-under-mask 85 * instruction. 86 */ 87 .macro TSTMSK addr, mask, size=8, bytepos=0 88 .if (\bytepos < \size) && (\mask >> 8) 89 .if (\mask & 0xff) 90 .error "Mask exceeds byte boundary" 91 .endif 92 TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)" 93 .exitm 94 .endif 95 .ifeq \mask 96 .error "Mask must not be zero" 97 .endif 98 off = \size - \bytepos - 1 99 tm off+\addr, \mask 100 .endm 101 102 .macro BPOFF 103 ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", 82 104 .endm 105 106 .macro BPON 107 ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", 82 108 .endm 109 110 .macro BPENTER tif_ptr,tif_mask 111 ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \ 112 "j .+12; nop; nop", 82 113 .endm 114 115 .macro BPEXIT tif_ptr,tif_mask 116 TSTMSK \tif_ptr,\tif_mask 117 ALTERNATIVE "jz .+8; .insn rrf,0xb2e80000,0,0,12,0", \ 118 "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", 82 119 .endm 120 121#if IS_ENABLED(CONFIG_KVM) 122 .macro SIEEXIT sie_control 123 lg %r9,\sie_control # get control block pointer 124 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 125 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce 126 ni __LC_CPU_FLAGS+7,255-_CIF_SIE 127 larl %r9,sie_exit # skip forward to sie_exit 128 .endm 129#endif 130 131 .macro STACKLEAK_ERASE 132#ifdef CONFIG_GCC_PLUGIN_STACKLEAK 133 brasl %r14,stackleak_erase_on_task_stack 134#endif 135 .endm 136 137 GEN_BR_THUNK %r14 138 139 .section .kprobes.text, "ax" 140.Ldummy: 141 /* 142 * The following nop exists only in order to avoid that the next 143 * symbol starts at the beginning of the kprobes text section. 144 * In that case there would be several symbols at the same address. 145 * E.g. objdump would take an arbitrary symbol when disassembling 146 * the code. 147 * With the added nop in between this cannot happen. 148 */ 149 nop 0 150 151/* 152 * Scheduler resume function, called by __switch_to 153 * gpr2 = (task_struct *)prev 154 * gpr3 = (task_struct *)next 155 * Returns: 156 * gpr2 = prev 157 */ 158SYM_FUNC_START(__switch_to_asm) 159 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 160 lghi %r4,__TASK_stack 161 lghi %r1,__TASK_thread 162 llill %r5,STACK_INIT_OFFSET 163 stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev 164 lg %r15,0(%r4,%r3) # start of kernel stack of next 165 agr %r15,%r5 # end of kernel stack of next 166 stg %r3,__LC_CURRENT # store task struct of next 167 stg %r15,__LC_KERNEL_STACK # store end of kernel stack 168 lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next 169 aghi %r3,__TASK_pid 170 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next 171 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 172 ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40 173 BR_EX %r14 174SYM_FUNC_END(__switch_to_asm) 175 176#if IS_ENABLED(CONFIG_KVM) 177/* 178 * __sie64a calling convention: 179 * %r2 pointer to sie control block phys 180 * %r3 pointer to sie control block virt 181 * %r4 guest register save area 182 * %r5 guest asce 183 */ 184SYM_FUNC_START(__sie64a) 185 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers 186 lg %r12,__LC_CURRENT 187 stg %r2,__SF_SIE_CONTROL_PHYS(%r15) # save sie block physical.. 188 stg %r3,__SF_SIE_CONTROL(%r15) # ...and virtual addresses 189 stg %r4,__SF_SIE_SAVEAREA(%r15) # save guest register save area 190 stg %r5,__SF_SIE_GUEST_ASCE(%r15) # save guest asce 191 xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0 192 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags 193 lmg %r0,%r13,0(%r4) # load guest gprs 0-13 194 oi __LC_CPU_FLAGS+7,_CIF_SIE 195 lctlg %c1,%c1,__SF_SIE_GUEST_ASCE(%r15) # load primary asce 196 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 197 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now 198 tm __SIE_PROG20+3(%r14),3 # last exit... 199 jnz .Lsie_skip 200 lg %r14,__SF_SIE_CONTROL_PHYS(%r15) # get sie block phys addr 201 BPEXIT __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 202.Lsie_entry: 203 sie 0(%r14) 204# Let the next instruction be NOP to avoid triggering a machine check 205# and handling it in a guest as result of the instruction execution. 206 nopr 7 207.Lsie_leave: 208 BPOFF 209 BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 210.Lsie_skip: 211 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 212 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 213 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce 214 ni __LC_CPU_FLAGS+7,255-_CIF_SIE 215# some program checks are suppressing. C code (e.g. do_protection_exception) 216# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There 217# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. 218# Other instructions between __sie64a and .Lsie_done should not cause program 219# interrupts. So lets use 3 nops as a landing pad for all possible rewinds. 220.Lrewind_pad6: 221 nopr 7 222.Lrewind_pad4: 223 nopr 7 224.Lrewind_pad2: 225 nopr 7 226SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL) 227 lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area 228 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 229 xgr %r0,%r0 # clear guest registers to 230 xgr %r1,%r1 # prevent speculative use 231 xgr %r3,%r3 232 xgr %r4,%r4 233 xgr %r5,%r5 234 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 235 lg %r2,__SF_SIE_REASON(%r15) # return exit reason code 236 BR_EX %r14 237.Lsie_fault: 238 lghi %r14,-EFAULT 239 stg %r14,__SF_SIE_REASON(%r15) # set exit reason code 240 j sie_exit 241 242 EX_TABLE(.Lrewind_pad6,.Lsie_fault) 243 EX_TABLE(.Lrewind_pad4,.Lsie_fault) 244 EX_TABLE(.Lrewind_pad2,.Lsie_fault) 245 EX_TABLE(sie_exit,.Lsie_fault) 246SYM_FUNC_END(__sie64a) 247EXPORT_SYMBOL(__sie64a) 248EXPORT_SYMBOL(sie_exit) 249#endif 250 251/* 252 * SVC interrupt handler routine. System calls are synchronous events and 253 * are entered with interrupts disabled. 254 */ 255 256SYM_CODE_START(system_call) 257 stpt __LC_SYS_ENTER_TIMER 258 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 259 BPOFF 260 lghi %r14,0 261.Lsysc_per: 262 STBEAR __LC_LAST_BREAK 263 lctlg %c1,%c1,__LC_KERNEL_ASCE 264 lg %r15,__LC_KERNEL_STACK 265 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 266 stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 267 # clear user controlled register to prevent speculative use 268 xgr %r0,%r0 269 xgr %r1,%r1 270 xgr %r4,%r4 271 xgr %r5,%r5 272 xgr %r6,%r6 273 xgr %r7,%r7 274 xgr %r8,%r8 275 xgr %r9,%r9 276 xgr %r10,%r10 277 xgr %r11,%r11 278 la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 279 mvc __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC 280 MBEAR %r2 281 lgr %r3,%r14 282 brasl %r14,__do_syscall 283 STACKLEAK_ERASE 284 lctlg %c1,%c1,__LC_USER_ASCE 285 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 286 BPON 287 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) 288 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 289 stpt __LC_EXIT_TIMER 290 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 291SYM_CODE_END(system_call) 292 293# 294# a new process exits the kernel with ret_from_fork 295# 296SYM_CODE_START(ret_from_fork) 297 lgr %r3,%r11 298 brasl %r14,__ret_from_fork 299 STACKLEAK_ERASE 300 lctlg %c1,%c1,__LC_USER_ASCE 301 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 302 BPON 303 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) 304 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 305 stpt __LC_EXIT_TIMER 306 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 307SYM_CODE_END(ret_from_fork) 308 309/* 310 * Program check handler routine 311 */ 312 313SYM_CODE_START(pgm_check_handler) 314 stpt __LC_SYS_ENTER_TIMER 315 BPOFF 316 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 317 lgr %r10,%r15 318 lmg %r8,%r9,__LC_PGM_OLD_PSW 319 tmhh %r8,0x0001 # coming from user space? 320 jno .Lpgm_skip_asce 321 lctlg %c1,%c1,__LC_KERNEL_ASCE 322 j 3f # -> fault in user space 323.Lpgm_skip_asce: 3241: tmhh %r8,0x4000 # PER bit set in old PSW ? 325 jnz 2f # -> enabled, can't be a double fault 326 tm __LC_PGM_ILC+3,0x80 # check for per exception 327 jnz .Lpgm_svcper # -> single stepped svc 3282: CHECK_STACK __LC_SAVE_AREA_SYNC 329 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 330 # CHECK_VMAP_STACK branches to stack_overflow or 4f 331 CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f 3323: lg %r15,__LC_KERNEL_STACK 3334: la %r11,STACK_FRAME_OVERHEAD(%r15) 334 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 335 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 336 stmg %r0,%r7,__PT_R0(%r11) 337 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 338 mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK 339 stctg %c1,%c1,__PT_CR1(%r11) 340#if IS_ENABLED(CONFIG_KVM) 341 ltg %r12,__LC_GMAP 342 jz 5f 343 clc __GMAP_ASCE(8,%r12), __PT_CR1(%r11) 344 jne 5f 345 BPENTER __SF_SIE_FLAGS(%r10),_TIF_ISOLATE_BP_GUEST 346 SIEEXIT __SF_SIE_CONTROL(%r10) 347#endif 3485: stmg %r8,%r9,__PT_PSW(%r11) 349 # clear user controlled registers to prevent speculative use 350 xgr %r0,%r0 351 xgr %r1,%r1 352 xgr %r3,%r3 353 xgr %r4,%r4 354 xgr %r5,%r5 355 xgr %r6,%r6 356 xgr %r7,%r7 357 lgr %r2,%r11 358 brasl %r14,__do_pgm_check 359 tmhh %r8,0x0001 # returning to user space? 360 jno .Lpgm_exit_kernel 361 STACKLEAK_ERASE 362 lctlg %c1,%c1,__LC_USER_ASCE 363 BPON 364 stpt __LC_EXIT_TIMER 365.Lpgm_exit_kernel: 366 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 367 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) 368 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 369 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 370 371# 372# single stepped system call 373# 374.Lpgm_svcper: 375 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW 376 larl %r14,.Lsysc_per 377 stg %r14,__LC_RETURN_PSW+8 378 lghi %r14,1 379 LBEAR __LC_PGM_LAST_BREAK 380 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per 381SYM_CODE_END(pgm_check_handler) 382 383/* 384 * Interrupt handler macro used for external and IO interrupts. 385 */ 386.macro INT_HANDLER name,lc_old_psw,handler 387SYM_CODE_START(\name) 388 stckf __LC_INT_CLOCK 389 stpt __LC_SYS_ENTER_TIMER 390 STBEAR __LC_LAST_BREAK 391 BPOFF 392 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 393 lmg %r8,%r9,\lc_old_psw 394 tmhh %r8,0x0001 # interrupting from user ? 395 jnz 1f 396#if IS_ENABLED(CONFIG_KVM) 397 TSTMSK __LC_CPU_FLAGS,_CIF_SIE 398 jz 0f 399 BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 400 SIEEXIT __SF_SIE_CONTROL(%r15) 401#endif 4020: CHECK_STACK __LC_SAVE_AREA_ASYNC 403 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 404 j 2f 4051: lctlg %c1,%c1,__LC_KERNEL_ASCE 406 lg %r15,__LC_KERNEL_STACK 4072: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 408 la %r11,STACK_FRAME_OVERHEAD(%r15) 409 stmg %r0,%r7,__PT_R0(%r11) 410 # clear user controlled registers to prevent speculative use 411 xgr %r0,%r0 412 xgr %r1,%r1 413 xgr %r3,%r3 414 xgr %r4,%r4 415 xgr %r5,%r5 416 xgr %r6,%r6 417 xgr %r7,%r7 418 xgr %r10,%r10 419 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 420 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 421 MBEAR %r11 422 stmg %r8,%r9,__PT_PSW(%r11) 423 lgr %r2,%r11 # pass pointer to pt_regs 424 brasl %r14,\handler 425 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 426 tmhh %r8,0x0001 # returning to user ? 427 jno 2f 428 STACKLEAK_ERASE 429 lctlg %c1,%c1,__LC_USER_ASCE 430 BPON 431 stpt __LC_EXIT_TIMER 4322: LBEAR __PT_LAST_BREAK(%r11) 433 lmg %r0,%r15,__PT_R0(%r11) 434 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 435SYM_CODE_END(\name) 436.endm 437 438INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq 439INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq 440 441/* 442 * Machine check handler routines 443 */ 444SYM_CODE_START(mcck_int_handler) 445 BPOFF 446 lmg %r8,%r9,__LC_MCK_OLD_PSW 447 TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE 448 jo .Lmcck_panic # yes -> rest of mcck code invalid 449 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID 450 jno .Lmcck_panic # control registers invalid -> panic 451 ptlb 452 lghi %r14,__LC_CPU_TIMER_SAVE_AREA 453 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 454 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID 455 jo 3f 456 la %r14,__LC_SYS_ENTER_TIMER 457 clc 0(8,%r14),__LC_EXIT_TIMER 458 jl 1f 459 la %r14,__LC_EXIT_TIMER 4601: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER 461 jl 2f 462 la %r14,__LC_LAST_UPDATE_TIMER 4632: spt 0(%r14) 464 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 4653: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID 466 jno .Lmcck_panic 467 tmhh %r8,0x0001 # interrupting from user ? 468 jnz .Lmcck_user 469 TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID 470 jno .Lmcck_panic 471#if IS_ENABLED(CONFIG_KVM) 472 TSTMSK __LC_CPU_FLAGS,_CIF_SIE 473 jz .Lmcck_user 474 # Need to compare the address instead of a CIF_SIE* flag. 475 # Otherwise there would be a race between setting the flag 476 # and entering SIE (or leaving and clearing the flag). This 477 # would cause machine checks targeted at the guest to be 478 # handled by the host. 479 larl %r14,.Lsie_entry 480 clgrjl %r9,%r14, 4f 481 larl %r14,.Lsie_leave 482 clgrjhe %r9,%r14, 4f 483 oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST 4844: BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 485 SIEEXIT __SF_SIE_CONTROL(%r15) 486#endif 487.Lmcck_user: 488 lg %r15,__LC_MCCK_STACK 489 la %r11,STACK_FRAME_OVERHEAD(%r15) 490 stctg %c1,%c1,__PT_CR1(%r11) 491 lctlg %c1,%c1,__LC_KERNEL_ASCE 492 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 493 lghi %r14,__LC_GPREGS_SAVE_AREA+64 494 stmg %r0,%r7,__PT_R0(%r11) 495 # clear user controlled registers to prevent speculative use 496 xgr %r0,%r0 497 xgr %r1,%r1 498 xgr %r3,%r3 499 xgr %r4,%r4 500 xgr %r5,%r5 501 xgr %r6,%r6 502 xgr %r7,%r7 503 xgr %r10,%r10 504 mvc __PT_R8(64,%r11),0(%r14) 505 stmg %r8,%r9,__PT_PSW(%r11) 506 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 507 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 508 lgr %r2,%r11 # pass pointer to pt_regs 509 brasl %r14,s390_do_machine_check 510 lctlg %c1,%c1,__PT_CR1(%r11) 511 lmg %r0,%r10,__PT_R0(%r11) 512 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW 513 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 514 jno 0f 515 BPON 516 stpt __LC_EXIT_TIMER 5170: ALTERNATIVE "nop", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193 518 LBEAR 0(%r12) 519 lmg %r11,%r15,__PT_R11(%r11) 520 LPSWEY __LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE 521 522.Lmcck_panic: 523 /* 524 * Iterate over all possible CPU addresses in the range 0..0xffff 525 * and stop each CPU using signal processor. Use compare and swap 526 * to allow just one CPU-stopper and prevent concurrent CPUs from 527 * stopping each other while leaving the others running. 528 */ 529 lhi %r5,0 530 lhi %r6,1 531 larl %r7,stop_lock 532 cs %r5,%r6,0(%r7) # single CPU-stopper only 533 jnz 4f 534 larl %r7,this_cpu 535 stap 0(%r7) # this CPU address 536 lh %r4,0(%r7) 537 nilh %r4,0 538 lhi %r0,1 539 sll %r0,16 # CPU counter 540 lhi %r3,0 # next CPU address 5410: cr %r3,%r4 542 je 2f 5431: sigp %r1,%r3,SIGP_STOP # stop next CPU 544 brc SIGP_CC_BUSY,1b 5452: ahi %r3,1 546 brct %r0,0b 5473: sigp %r1,%r4,SIGP_STOP # stop this CPU 548 brc SIGP_CC_BUSY,3b 5494: j 4b 550SYM_CODE_END(mcck_int_handler) 551 552SYM_CODE_START(restart_int_handler) 553 ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40 554 stg %r15,__LC_SAVE_AREA_RESTART 555 TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4 556 jz 0f 557 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA 5580: larl %r15,daton_psw 559 lpswe 0(%r15) # turn dat on, keep irqs off 560.Ldaton: 561 lg %r15,__LC_RESTART_STACK 562 xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15) 563 stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 564 mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART 565 mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW 566 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) 567 lg %r1,__LC_RESTART_FN # load fn, parm & source cpu 568 lg %r2,__LC_RESTART_DATA 569 lgf %r3,__LC_RESTART_SOURCE 570 ltgr %r3,%r3 # test source cpu address 571 jm 1f # negative -> skip source stop 5720: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu 573 brc 10,0b # wait for status stored 5741: basr %r14,%r1 # call function 575 stap __SF_EMPTY(%r15) # store cpu address 576 llgh %r3,__SF_EMPTY(%r15) 5772: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu 578 brc 2,2b 5793: j 3b 580SYM_CODE_END(restart_int_handler) 581 582 .section .kprobes.text, "ax" 583 584#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK) 585/* 586 * The synchronous or the asynchronous stack overflowed. We are dead. 587 * No need to properly save the registers, we are going to panic anyway. 588 * Setup a pt_regs so that show_trace can provide a good call trace. 589 */ 590SYM_CODE_START(stack_overflow) 591 lg %r15,__LC_NODAT_STACK # change to panic stack 592 la %r11,STACK_FRAME_OVERHEAD(%r15) 593 stmg %r0,%r7,__PT_R0(%r11) 594 stmg %r8,%r9,__PT_PSW(%r11) 595 mvc __PT_R8(64,%r11),0(%r14) 596 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 597 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 598 lgr %r2,%r11 # pass pointer to pt_regs 599 jg kernel_stack_overflow 600SYM_CODE_END(stack_overflow) 601#endif 602 603 .section .data, "aw" 604 .balign 4 605SYM_DATA_LOCAL(stop_lock, .long 0) 606SYM_DATA_LOCAL(this_cpu, .short 0) 607 .balign 8 608SYM_DATA_START_LOCAL(daton_psw) 609 .quad PSW_KERNEL_BITS 610 .quad .Ldaton 611SYM_DATA_END(daton_psw) 612 613 .section .rodata, "a" 614 .balign 8 615#define SYSCALL(esame,emu) .quad __s390x_ ## esame 616SYM_DATA_START(sys_call_table) 617#include "asm/syscall_table.h" 618SYM_DATA_END(sys_call_table) 619#undef SYSCALL 620 621#ifdef CONFIG_COMPAT 622 623#define SYSCALL(esame,emu) .quad __s390_ ## emu 624SYM_DATA_START(sys_call_table_emu) 625#include "asm/syscall_table.h" 626SYM_DATA_END(sys_call_table_emu) 627#undef SYSCALL 628#endif 629