1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * S390 low-level entry points. 4 * 5 * Copyright IBM Corp. 1999, 2012 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Hartmut Penner (hp@de.ibm.com), 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 9 */ 10 11#include <linux/export.h> 12#include <linux/init.h> 13#include <linux/linkage.h> 14#include <asm/asm-extable.h> 15#include <asm/alternative.h> 16#include <asm/processor.h> 17#include <asm/cache.h> 18#include <asm/dwarf.h> 19#include <asm/errno.h> 20#include <asm/ptrace.h> 21#include <asm/thread_info.h> 22#include <asm/asm-offsets.h> 23#include <asm/unistd.h> 24#include <asm/page.h> 25#include <asm/sigp.h> 26#include <asm/bug.h> 27#include <asm/irq.h> 28#include <asm/fpu-insn.h> 29#include <asm/setup.h> 30#include <asm/nmi.h> 31#include <asm/nospec-insn.h> 32#include <asm/lowcore.h> 33#include <asm/machine.h> 34 35_LPP_OFFSET = __LC_LPP 36 37 .macro STBEAR address 38 ALTERNATIVE "nop", ".insn s,0xb2010000,\address", ALT_FACILITY(193) 39 .endm 40 41 .macro LBEAR address 42 ALTERNATIVE "nop", ".insn s,0xb2000000,\address", ALT_FACILITY(193) 43 .endm 44 45 .macro LPSWEY address, lpswe 46 ALTERNATIVE_2 "b \lpswe;nopr", \ 47 ".insn siy,0xeb0000000071,\address,0", ALT_FACILITY(193), \ 48 __stringify(.insn siy,0xeb0000000071,LOWCORE_ALT_ADDRESS+\address,0), \ 49 ALT_FEATURE(MFEATURE_LOWCORE) 50 .endm 51 52 .macro MBEAR reg, lowcore 53 ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK(\lowcore)),\ 54 ALT_FACILITY(193) 55 .endm 56 57 .macro CHECK_VMAP_STACK savearea, lowcore, oklabel 58 lgr %r14,%r15 59 nill %r14,0x10000 - THREAD_SIZE 60 oill %r14,STACK_INIT_OFFSET 61 clg %r14,__LC_KERNEL_STACK(\lowcore) 62 je \oklabel 63 clg %r14,__LC_ASYNC_STACK(\lowcore) 64 je \oklabel 65 clg %r14,__LC_MCCK_STACK(\lowcore) 66 je \oklabel 67 clg %r14,__LC_NODAT_STACK(\lowcore) 68 je \oklabel 69 clg %r14,__LC_RESTART_STACK(\lowcore) 70 je \oklabel 71 la %r14,\savearea(\lowcore) 72 j stack_invalid 73 .endm 74 75 /* 76 * The TSTMSK macro generates a test-under-mask instruction by 77 * calculating the memory offset for the specified mask value. 78 * Mask value can be any constant. The macro shifts the mask 79 * value to calculate the memory offset for the test-under-mask 80 * instruction. 81 */ 82 .macro TSTMSK addr, mask, size=8, bytepos=0 83 .if (\bytepos < \size) && (\mask >> 8) 84 .if (\mask & 0xff) 85 .error "Mask exceeds byte boundary" 86 .endif 87 TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)" 88 .exitm 89 .endif 90 .ifeq \mask 91 .error "Mask must not be zero" 92 .endif 93 off = \size - \bytepos - 1 94 tm off+\addr, \mask 95 .endm 96 97 .macro BPOFF 98 ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", ALT_SPEC(82) 99 .endm 100 101 .macro BPON 102 ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", ALT_SPEC(82) 103 .endm 104 105 .macro BPENTER tif_ptr,tif_mask 106 ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \ 107 "j .+12; nop; nop", ALT_SPEC(82) 108 .endm 109 110 .macro BPEXIT tif_ptr,tif_mask 111 TSTMSK \tif_ptr,\tif_mask 112 ALTERNATIVE "jz .+8; .insn rrf,0xb2e80000,0,0,12,0", \ 113 "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", ALT_SPEC(82) 114 .endm 115 116#if IS_ENABLED(CONFIG_KVM) 117 .macro SIEEXIT sie_control,lowcore 118 lg %r9,\sie_control # get control block pointer 119 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 120 lctlg %c1,%c1,__LC_USER_ASCE(\lowcore) # load primary asce 121 lg %r9,__LC_CURRENT(\lowcore) 122 mvi __TI_sie(%r9),0 123 larl %r9,sie_exit # skip forward to sie_exit 124 .endm 125#endif 126 127 .macro STACKLEAK_ERASE 128#ifdef CONFIG_KSTACK_ERASE 129 brasl %r14,stackleak_erase_on_task_stack 130#endif 131 .endm 132 133 GEN_BR_THUNK %r14 134 135 .section .kprobes.text, "ax" 136.Ldummy: 137 /* 138 * The following nop exists only in order to avoid that the next 139 * symbol starts at the beginning of the kprobes text section. 140 * In that case there would be several symbols at the same address. 141 * E.g. objdump would take an arbitrary symbol when disassembling 142 * the code. 143 * With the added nop in between this cannot happen. 144 */ 145 nop 0 146 147/* 148 * Scheduler resume function, called by __switch_to 149 * gpr2 = (task_struct *)prev 150 * gpr3 = (task_struct *)next 151 * Returns: 152 * gpr2 = prev 153 */ 154SYM_FUNC_START(__switch_to_asm) 155 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 156 lghi %r4,__TASK_stack 157 lghi %r1,__TASK_thread 158 llill %r5,STACK_INIT_OFFSET 159 stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev 160 lg %r15,0(%r4,%r3) # start of kernel stack of next 161 agr %r15,%r5 # end of kernel stack of next 162 GET_LC %r13 163 stg %r3,__LC_CURRENT(%r13) # store task struct of next 164 stg %r15,__LC_KERNEL_STACK(%r13) # store end of kernel stack 165 lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next 166 lay %r4,__TASK_pid(%r3) 167 mvc __LC_CURRENT_PID(4,%r13),0(%r4) # store pid of next 168 ALTERNATIVE "nop", "lpp _LPP_OFFSET(%r13)", ALT_FACILITY(40) 169#ifdef CONFIG_STACKPROTECTOR 170 lg %r3,__TASK_stack_canary(%r3) 171 stg %r3,__LC_STACK_CANARY(%r13) 172#endif 173 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 174 BR_EX %r14 175SYM_FUNC_END(__switch_to_asm) 176 177#if defined(CONFIG_BUG) && defined(CONFIG_CC_HAS_ASM_IMMEDIATE_STRINGS) 178 179SYM_FUNC_START(__WARN_trap) 180 mc MONCODE_BUG_ARG(%r0),0 181 BR_EX %r14 182SYM_FUNC_END(__WARN_trap) 183EXPORT_SYMBOL(__WARN_trap) 184 185#endif /* CONFIG_BUG && CONFIG_CC_HAS_ASM_IMMEDIATE_STRINGS */ 186 187#if IS_ENABLED(CONFIG_KVM) 188/* 189 * __sie64a calling convention: 190 * %r2 pointer to sie control block phys 191 * %r3 pointer to sie control block virt 192 * %r4 guest register save area 193 * %r5 guest asce 194 */ 195SYM_FUNC_START(__sie64a) 196 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers 197 GET_LC %r13 198 lg %r14,__LC_CURRENT(%r13) 199 stg %r2,__SF_SIE_CONTROL_PHYS(%r15) # save sie block physical.. 200 stg %r3,__SF_SIE_CONTROL(%r15) # ...and virtual addresses 201 stg %r4,__SF_SIE_SAVEAREA(%r15) # save guest register save area 202 stg %r5,__SF_SIE_GUEST_ASCE(%r15) # save guest asce 203 xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0 204 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r14) # copy thread flags 205 lmg %r0,%r13,0(%r4) # load guest gprs 0-13 206 mvi __TI_sie(%r14),1 207 stosm __SF_SIE_IRQ(%r15),0x03 # enable interrupts 208 lctlg %c1,%c1,__SF_SIE_GUEST_ASCE(%r15) # load primary asce 209 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 210 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now 211 tm __SIE_PROG20+3(%r14),3 # last exit... 212 jnz .Lsie_skip 213 lg %r14,__SF_SIE_CONTROL_PHYS(%r15) # get sie block phys addr 214 BPEXIT __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 215.Lsie_entry: 216 sie 0(%r14) 217# Let the next instruction be NOP to avoid triggering a machine check 218# and handling it in a guest as result of the instruction execution. 219 nopr 7 220.Lsie_leave: 221 BPOFF 222 BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 223.Lsie_skip: 224 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 225 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 226 GET_LC %r14 227 lctlg %c1,%c1,__LC_USER_ASCE(%r14) # load primary asce 228 lg %r14,__LC_CURRENT(%r14) 229 mvi __TI_sie(%r14),0 230SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL) 231 stnsm __SF_SIE_IRQ(%r15),0xfc # disable interrupts 232 lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area 233 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 234 xgr %r0,%r0 # clear guest registers to 235 xgr %r1,%r1 # prevent speculative use 236 xgr %r3,%r3 237 xgr %r4,%r4 238 xgr %r5,%r5 239 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 240 lg %r2,__SF_SIE_REASON(%r15) # return exit reason code 241 BR_EX %r14 242SYM_FUNC_END(__sie64a) 243EXPORT_SYMBOL(__sie64a) 244EXPORT_SYMBOL(sie_exit) 245#endif 246 247/* 248 * SVC interrupt handler routine. System calls are synchronous events and 249 * are entered with interrupts disabled. 250 */ 251 252SYM_CODE_START(system_call) 253 STMG_LC %r8,%r15,__LC_SAVE_AREA 254 GET_LC %r13 255 stpt __LC_SYS_ENTER_TIMER(%r13) 256 BPOFF 257 lghi %r14,0 258.Lsysc_per: 259 STBEAR __LC_LAST_BREAK(%r13) 260 lg %r15,__LC_KERNEL_STACK(%r13) 261 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 262 stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 263 # clear user controlled register to prevent speculative use 264 xgr %r0,%r0 265 xgr %r1,%r1 266 xgr %r4,%r4 267 xgr %r5,%r5 268 xgr %r6,%r6 269 xgr %r7,%r7 270 xgr %r8,%r8 271 xgr %r9,%r9 272 xgr %r10,%r10 273 xgr %r11,%r11 274 la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 275 mvc __PT_R8(64,%r2),__LC_SAVE_AREA(%r13) 276 MBEAR %r2,%r13 277 lgr %r3,%r14 278 brasl %r14,__do_syscall 279 STACKLEAK_ERASE 280 mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 281 BPON 282 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) 283 stpt __LC_EXIT_TIMER(%r13) 284 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 285 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 286SYM_CODE_END(system_call) 287 288# 289# a new process exits the kernel with ret_from_fork 290# 291SYM_CODE_START(ret_from_fork) 292 lgr %r3,%r11 293 brasl %r14,__ret_from_fork 294 STACKLEAK_ERASE 295 GET_LC %r13 296 mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 297 BPON 298 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) 299 stpt __LC_EXIT_TIMER(%r13) 300 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 301 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 302SYM_CODE_END(ret_from_fork) 303 304/* 305 * Program check handler routine 306 */ 307 308SYM_CODE_START(pgm_check_handler) 309 STMG_LC %r8,%r15,__LC_SAVE_AREA 310 GET_LC %r13 311 stpt __LC_SYS_ENTER_TIMER(%r13) 312 BPOFF 313 lmg %r8,%r9,__LC_PGM_OLD_PSW(%r13) 314 xgr %r10,%r10 315 tmhh %r8,0x0001 # coming from user space? 316 jo 3f # -> fault in user space 317#if IS_ENABLED(CONFIG_KVM) 318 lg %r11,__LC_CURRENT(%r13) 319 tm __TI_sie(%r11),0xff 320 jz 1f 321 BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 322 SIEEXIT __SF_SIE_CONTROL(%r15),%r13 323 lghi %r10,_PIF_GUEST_FAULT 324#endif 3251: tmhh %r8,0x4000 # PER bit set in old PSW ? 326 jnz 2f # -> enabled, can't be a double fault 327 tm __LC_PGM_ILC+3(%r13),0x80 # check for per exception 328 jnz .Lpgm_svcper # -> single stepped svc 3292: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 330 # CHECK_VMAP_STACK branches to stack_invalid or 4f 331 CHECK_VMAP_STACK __LC_SAVE_AREA,%r13,4f 3323: lg %r15,__LC_KERNEL_STACK(%r13) 3334: la %r11,STACK_FRAME_OVERHEAD(%r15) 334 stg %r10,__PT_FLAGS(%r11) 335 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 336 stmg %r0,%r7,__PT_R0(%r11) 337 mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13) 338 mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK(%r13) 339 stmg %r8,%r9,__PT_PSW(%r11) 340 # clear user controlled registers to prevent speculative use 341 xgr %r0,%r0 342 xgr %r1,%r1 343 xgr %r3,%r3 344 xgr %r4,%r4 345 xgr %r5,%r5 346 xgr %r6,%r6 347 xgr %r7,%r7 348 xgr %r12,%r12 349 lgr %r2,%r11 350 brasl %r14,__do_pgm_check 351 tmhh %r8,0x0001 # returning to user space? 352 jno .Lpgm_exit_kernel 353 STACKLEAK_ERASE 354 BPON 355 stpt __LC_EXIT_TIMER(%r13) 356.Lpgm_exit_kernel: 357 mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 358 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) 359 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 360 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 361 362# 363# single stepped system call 364# 365.Lpgm_svcper: 366 mvc __LC_RETURN_PSW(8,%r13),__LC_SVC_NEW_PSW(%r13) 367 larl %r14,.Lsysc_per 368 stg %r14,__LC_RETURN_PSW+8(%r13) 369 lghi %r14,1 370 LBEAR __LC_PGM_LAST_BREAK(%r13) 371 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per 372SYM_CODE_END(pgm_check_handler) 373 374/* 375 * Interrupt handler macro used for external and IO interrupts. 376 */ 377.macro INT_HANDLER name,lc_old_psw,handler 378SYM_CODE_START(\name) 379 STMG_LC %r8,%r15,__LC_SAVE_AREA 380 GET_LC %r13 381 stckf __LC_INT_CLOCK(%r13) 382 stpt __LC_SYS_ENTER_TIMER(%r13) 383 STBEAR __LC_LAST_BREAK(%r13) 384 BPOFF 385 lmg %r8,%r9,\lc_old_psw(%r13) 386 tmhh %r8,0x0001 # interrupting from user ? 387 jnz 1f 388#if IS_ENABLED(CONFIG_KVM) 389 lg %r10,__LC_CURRENT(%r13) 390 tm __TI_sie(%r10),0xff 391 jz 0f 392 BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 393 SIEEXIT __SF_SIE_CONTROL(%r15),%r13 394#endif 3950: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 396 j 2f 3971: lg %r15,__LC_KERNEL_STACK(%r13) 3982: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 399 la %r11,STACK_FRAME_OVERHEAD(%r15) 400 stmg %r0,%r7,__PT_R0(%r11) 401 # clear user controlled registers to prevent speculative use 402 xgr %r0,%r0 403 xgr %r1,%r1 404 xgr %r3,%r3 405 xgr %r4,%r4 406 xgr %r5,%r5 407 xgr %r6,%r6 408 xgr %r7,%r7 409 xgr %r10,%r10 410 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 411 mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13) 412 MBEAR %r11,%r13 413 stmg %r8,%r9,__PT_PSW(%r11) 414 lgr %r2,%r11 # pass pointer to pt_regs 415 brasl %r14,\handler 416 mvc __LC_RETURN_PSW(16,%r13),__PT_PSW(%r11) 417 tmhh %r8,0x0001 # returning to user ? 418 jno 2f 419 STACKLEAK_ERASE 420 BPON 421 stpt __LC_EXIT_TIMER(%r13) 4222: LBEAR __PT_LAST_BREAK(%r11) 423 lmg %r0,%r15,__PT_R0(%r11) 424 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 425SYM_CODE_END(\name) 426.endm 427 428 .section .irqentry.text, "ax" 429 430INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq 431INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq 432 433 .section .kprobes.text, "ax" 434 435/* 436 * Machine check handler routines 437 */ 438SYM_CODE_START(mcck_int_handler) 439 BPOFF 440 GET_LC %r13 441 lmg %r8,%r9,__LC_MCK_OLD_PSW(%r13) 442 TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE_SYSTEM_DAMAGE 443 jo .Lmcck_panic # yes -> rest of mcck code invalid 444 TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE_CR_VALID 445 jno .Lmcck_panic # control registers invalid -> panic 446 ptlb 447 lay %r14,__LC_CPU_TIMER_SAVE_AREA(%r13) 448 mvc __LC_MCCK_ENTER_TIMER(8,%r13),0(%r14) 449 TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE_CPU_TIMER_VALID 450 jo 3f 451 la %r14,__LC_SYS_ENTER_TIMER(%r13) 452 clc 0(8,%r14),__LC_EXIT_TIMER(%r13) 453 jl 1f 454 la %r14,__LC_EXIT_TIMER(%r13) 4551: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER(%r13) 456 jl 2f 457 la %r14,__LC_LAST_UPDATE_TIMER(%r13) 4582: spt 0(%r14) 459 mvc __LC_MCCK_ENTER_TIMER(8,%r13),0(%r14) 4603: TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE_PSW_MWP_VALID 461 jno .Lmcck_panic 462 tmhh %r8,0x0001 # interrupting from user ? 463 jnz .Lmcck_user 464 TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE_PSW_IA_VALID 465 jno .Lmcck_panic 466#if IS_ENABLED(CONFIG_KVM) 467 lg %r10,__LC_CURRENT(%r13) 468 tm __TI_sie(%r10),0xff 469 jz .Lmcck_user 470 # Need to compare the address instead of __TI_SIE flag. 471 # Otherwise there would be a race between setting the flag 472 # and entering SIE (or leaving and clearing the flag). This 473 # would cause machine checks targeted at the guest to be 474 # handled by the host. 475 larl %r14,.Lsie_entry 476 clgrjl %r9,%r14, 4f 477 larl %r14,.Lsie_leave 478 clgrjhe %r9,%r14, 4f 479 lg %r10,__LC_PCPU(%r13) 480 oi __PCPU_FLAGS+7(%r10), _CIF_MCCK_GUEST 4814: BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 482 SIEEXIT __SF_SIE_CONTROL(%r15),%r13 483#endif 484.Lmcck_user: 485 lg %r15,__LC_MCCK_STACK(%r13) 486 la %r11,STACK_FRAME_OVERHEAD(%r15) 487 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 488 lay %r14,__LC_GPREGS_SAVE_AREA(%r13) 489 mvc __PT_R0(128,%r11),0(%r14) 490 # clear user controlled registers to prevent speculative use 491 xgr %r0,%r0 492 xgr %r1,%r1 493 xgr %r3,%r3 494 xgr %r4,%r4 495 xgr %r5,%r5 496 xgr %r6,%r6 497 xgr %r7,%r7 498 xgr %r10,%r10 499 stmg %r8,%r9,__PT_PSW(%r11) 500 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 501 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 502 lgr %r2,%r11 # pass pointer to pt_regs 503 brasl %r14,s390_do_machine_check 504 lmg %r0,%r10,__PT_R0(%r11) 505 mvc __LC_RETURN_MCCK_PSW(16,%r13),__PT_PSW(%r11) # move return PSW 506 tm __LC_RETURN_MCCK_PSW+1(%r13),0x01 # returning to user ? 507 jno 0f 508 BPON 509 stpt __LC_EXIT_TIMER(%r13) 5100: ALTERNATIVE "brcl 0,0", __stringify(lay %r12,__LC_LAST_BREAK_SAVE_AREA(%r13)),\ 511 ALT_FACILITY(193) 512 LBEAR 0(%r12) 513 lmg %r11,%r15,__PT_R11(%r11) 514 LPSWEY __LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE 515 516.Lmcck_panic: 517 /* 518 * Iterate over all possible CPU addresses in the range 0..0xffff 519 * and stop each CPU using signal processor. Use compare and swap 520 * to allow just one CPU-stopper and prevent concurrent CPUs from 521 * stopping each other while leaving the others running. 522 */ 523 lhi %r5,0 524 lhi %r6,1 525 larl %r7,stop_lock 526 cs %r5,%r6,0(%r7) # single CPU-stopper only 527 jnz 4f 528 larl %r7,this_cpu 529 stap 0(%r7) # this CPU address 530 lh %r4,0(%r7) 531 nilh %r4,0 532 lhi %r0,1 533 sll %r0,16 # CPU counter 534 lhi %r3,0 # next CPU address 5350: cr %r3,%r4 536 je 2f 5371: sigp %r1,%r3,SIGP_STOP # stop next CPU 538 brc SIGP_CC_BUSY,1b 5392: ahi %r3,1 540 brct %r0,0b 5413: sigp %r1,%r4,SIGP_STOP # stop this CPU 542 brc SIGP_CC_BUSY,3b 5434: j 4b 544SYM_CODE_END(mcck_int_handler) 545 546SYM_CODE_START(restart_int_handler) 547 ALTERNATIVE "nop", "lpp _LPP_OFFSET", ALT_FACILITY(40) 548 stg %r15,__LC_SAVE_AREA_RESTART 549 TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4 550 jz 0f 551 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA 5520: larl %r15,daton_psw 553 lpswe 0(%r15) # turn dat on, keep irqs off 554.Ldaton: 555 GET_LC %r15 556 lg %r15,__LC_RESTART_STACK(%r15) 557 xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15) 558 stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 559 GET_LC %r13 560 mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART(%r13) 561 mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW(%r13) 562 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) 563 lg %r1,__LC_RESTART_FN(%r13) # load fn, parm & source cpu 564 lg %r2,__LC_RESTART_DATA(%r13) 565 lgf %r3,__LC_RESTART_SOURCE(%r13) 566 ltgr %r3,%r3 # test source cpu address 567 jm 1f # negative -> skip source stop 5680: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu 569 brc 10,0b # wait for status stored 5701: basr %r14,%r1 # call function 571 stap __SF_EMPTY(%r15) # store cpu address 572 llgh %r3,__SF_EMPTY(%r15) 5732: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu 574 brc 2,2b 5753: j 3b 576SYM_CODE_END(restart_int_handler) 577 578 __INIT 579SYM_CODE_START(early_pgm_check_handler) 580 STMG_LC %r8,%r15,__LC_SAVE_AREA 581 GET_LC %r13 582 aghi %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE) 583 la %r11,STACK_FRAME_OVERHEAD(%r15) 584 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 585 stmg %r0,%r7,__PT_R0(%r11) 586 mvc __PT_PSW(16,%r11),__LC_PGM_OLD_PSW(%r13) 587 mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13) 588 lgr %r2,%r11 589 brasl %r14,__do_early_pgm_check 590 mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 591 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 592 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 593SYM_CODE_END(early_pgm_check_handler) 594 __FINIT 595 596 .section .kprobes.text, "ax" 597 598/* 599 * The synchronous or the asynchronous stack pointer is invalid. We are dead. 600 * No need to properly save the registers, we are going to panic anyway. 601 * Setup a pt_regs so that show_trace can provide a good call trace. 602 */ 603SYM_CODE_START(stack_invalid) 604 GET_LC %r15 605 lg %r15,__LC_NODAT_STACK(%r15) # change to panic stack 606 la %r11,STACK_FRAME_OVERHEAD(%r15) 607 stmg %r0,%r7,__PT_R0(%r11) 608 stmg %r8,%r9,__PT_PSW(%r11) 609 mvc __PT_R8(64,%r11),0(%r14) 610 GET_LC %r2 611 mvc __PT_ORIG_GPR2(8,%r11),__LC_PGM_LAST_BREAK(%r2) 612 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 613 lgr %r2,%r11 # pass pointer to pt_regs 614 jg kernel_stack_invalid 615SYM_CODE_END(stack_invalid) 616 617 .section .data, "aw" 618 .balign 4 619SYM_DATA_LOCAL(stop_lock, .long 0) 620SYM_DATA_LOCAL(this_cpu, .short 0) 621 .balign 8 622SYM_DATA_START_LOCAL(daton_psw) 623 .quad PSW_KERNEL_BITS 624 .quad .Ldaton 625SYM_DATA_END(daton_psw) 626