1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * S390 low-level entry points. 4 * 5 * Copyright IBM Corp. 1999, 2012 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Hartmut Penner (hp@de.ibm.com), 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 9 */ 10 11#include <linux/export.h> 12#include <linux/init.h> 13#include <linux/linkage.h> 14#include <asm/asm-extable.h> 15#include <asm/alternative.h> 16#include <asm/processor.h> 17#include <asm/cache.h> 18#include <asm/dwarf.h> 19#include <asm/errno.h> 20#include <asm/ptrace.h> 21#include <asm/thread_info.h> 22#include <asm/asm-offsets.h> 23#include <asm/unistd.h> 24#include <asm/page.h> 25#include <asm/sigp.h> 26#include <asm/bug.h> 27#include <asm/irq.h> 28#include <asm/fpu-insn.h> 29#include <asm/setup.h> 30#include <asm/nmi.h> 31#include <asm/nospec-insn.h> 32#include <asm/lowcore.h> 33#include <asm/machine.h> 34 35_LPP_OFFSET = __LC_LPP 36 37 .macro STBEAR address 38 ALTERNATIVE "nop", ".insn s,0xb2010000,\address", ALT_FACILITY(193) 39 .endm 40 41 .macro LBEAR address 42 ALTERNATIVE "nop", ".insn s,0xb2000000,\address", ALT_FACILITY(193) 43 .endm 44 45 .macro LPSWEY address, lpswe 46 ALTERNATIVE_2 "b \lpswe;nopr", \ 47 ".insn siy,0xeb0000000071,\address,0", ALT_FACILITY(193), \ 48 __stringify(.insn siy,0xeb0000000071,LOWCORE_ALT_ADDRESS+\address,0), \ 49 ALT_FEATURE(MFEATURE_LOWCORE) 50 .endm 51 52 .macro MBEAR reg, lowcore 53 ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK(\lowcore)),\ 54 ALT_FACILITY(193) 55 .endm 56 57 .macro CHECK_VMAP_STACK savearea, lowcore, oklabel 58 lgr %r14,%r15 59 nill %r14,0x10000 - THREAD_SIZE 60 oill %r14,STACK_INIT_OFFSET 61 clg %r14,__LC_KERNEL_STACK(\lowcore) 62 je \oklabel 63 clg %r14,__LC_ASYNC_STACK(\lowcore) 64 je \oklabel 65 clg %r14,__LC_MCCK_STACK(\lowcore) 66 je \oklabel 67 clg %r14,__LC_NODAT_STACK(\lowcore) 68 je \oklabel 69 clg %r14,__LC_RESTART_STACK(\lowcore) 70 je \oklabel 71 la %r14,\savearea(\lowcore) 72 j stack_invalid 73 .endm 74 75 /* 76 * The TSTMSK macro generates a test-under-mask instruction by 77 * calculating the memory offset for the specified mask value. 78 * Mask value can be any constant. The macro shifts the mask 79 * value to calculate the memory offset for the test-under-mask 80 * instruction. 81 */ 82 .macro TSTMSK addr, mask, size=8, bytepos=0 83 .if (\bytepos < \size) && (\mask >> 8) 84 .if (\mask & 0xff) 85 .error "Mask exceeds byte boundary" 86 .endif 87 TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)" 88 .exitm 89 .endif 90 .ifeq \mask 91 .error "Mask must not be zero" 92 .endif 93 off = \size - \bytepos - 1 94 tm off+\addr, \mask 95 .endm 96 97 .macro BPOFF 98 ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", ALT_SPEC(82) 99 .endm 100 101 .macro BPON 102 ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", ALT_SPEC(82) 103 .endm 104 105 .macro BPENTER tif_ptr,tif_mask 106 ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \ 107 "j .+12; nop; nop", ALT_SPEC(82) 108 .endm 109 110 .macro BPEXIT tif_ptr,tif_mask 111 TSTMSK \tif_ptr,\tif_mask 112 ALTERNATIVE "jz .+8; .insn rrf,0xb2e80000,0,0,12,0", \ 113 "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", ALT_SPEC(82) 114 .endm 115 116#if IS_ENABLED(CONFIG_KVM) 117 .macro SIEEXIT sie_control,lowcore 118 lg %r9,\sie_control # get control block pointer 119 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 120 lctlg %c1,%c1,__LC_USER_ASCE(\lowcore) # load primary asce 121 lg %r9,__LC_CURRENT(\lowcore) 122 mvi __TI_sie(%r9),0 123 larl %r9,sie_exit # skip forward to sie_exit 124 .endm 125#endif 126 127 .macro STACKLEAK_ERASE 128#ifdef CONFIG_KSTACK_ERASE 129 brasl %r14,stackleak_erase_on_task_stack 130#endif 131 .endm 132 133 GEN_BR_THUNK %r14 134 135 .section .kprobes.text, "ax" 136.Ldummy: 137 /* 138 * The following nop exists only in order to avoid that the next 139 * symbol starts at the beginning of the kprobes text section. 140 * In that case there would be several symbols at the same address. 141 * E.g. objdump would take an arbitrary symbol when disassembling 142 * the code. 143 * With the added nop in between this cannot happen. 144 */ 145 nop 0 146 147/* 148 * Scheduler resume function, called by __switch_to 149 * gpr2 = (task_struct *)prev 150 * gpr3 = (task_struct *)next 151 * Returns: 152 * gpr2 = prev 153 */ 154SYM_FUNC_START(__switch_to_asm) 155 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 156 lghi %r4,__TASK_stack 157 lghi %r1,__TASK_thread 158 llill %r5,STACK_INIT_OFFSET 159 stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev 160 lg %r15,0(%r4,%r3) # start of kernel stack of next 161 agr %r15,%r5 # end of kernel stack of next 162 GET_LC %r13 163 stg %r3,__LC_CURRENT(%r13) # store task struct of next 164 stg %r15,__LC_KERNEL_STACK(%r13) # store end of kernel stack 165 lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next 166 lay %r4,__TASK_pid(%r3) 167 mvc __LC_CURRENT_PID(4,%r13),0(%r4) # store pid of next 168 ALTERNATIVE "nop", "lpp _LPP_OFFSET(%r13)", ALT_FACILITY(40) 169#ifdef CONFIG_STACKPROTECTOR 170 lg %r3,__TASK_stack_canary(%r3) 171 stg %r3,__LC_STACK_CANARY(%r13) 172#endif 173 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 174 BR_EX %r14 175SYM_FUNC_END(__switch_to_asm) 176 177#if defined(CONFIG_BUG) && defined(CONFIG_CC_HAS_ASM_IMMEDIATE_STRINGS) 178 179SYM_FUNC_START(__WARN_trap) 180 mc MONCODE_BUG_ARG(%r0),0 181 BR_EX %r14 182SYM_FUNC_END(__WARN_trap) 183EXPORT_SYMBOL(__WARN_trap) 184 185#endif /* CONFIG_BUG && CONFIG_CC_HAS_ASM_IMMEDIATE_STRINGS */ 186 187#if IS_ENABLED(CONFIG_KVM) 188/* 189 * __sie64a calling convention: 190 * %r2 pointer to sie control block phys 191 * %r3 pointer to sie control block virt 192 * %r4 guest register save area 193 * %r5 guest asce 194 */ 195SYM_FUNC_START(__sie64a) 196 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers 197 GET_LC %r13 198 lg %r14,__LC_CURRENT(%r13) 199 stg %r2,__SF_SIE_CONTROL_PHYS(%r15) # save sie block physical.. 200 stg %r3,__SF_SIE_CONTROL(%r15) # ...and virtual addresses 201 stg %r4,__SF_SIE_SAVEAREA(%r15) # save guest register save area 202 stg %r5,__SF_SIE_GUEST_ASCE(%r15) # save guest asce 203 xc __SF_SIE_RETURN(8,%r15),__SF_SIE_RETURN(%r15) # return code = 0 204 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r14) # copy thread flags 205 lmg %r0,%r13,0(%r4) # load guest gprs 0-13 206 mvi __TI_sie(%r14),1 207 stosm __SF_SIE_IRQ(%r15),0x03 # enable interrupts 208 lctlg %c1,%c1,__SF_SIE_GUEST_ASCE(%r15) # load primary asce 209 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 210 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now 211 tm __SIE_PROG20+3(%r14),3 # last exit... 212 jnz .Lsie_skip 213 lg %r14,__SF_SIE_CONTROL_PHYS(%r15) # get sie block phys addr 214 BPEXIT __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 215.Lsie_entry: 216 sie 0(%r14) 217# Let the next instruction be NOP to avoid triggering a machine check 218# and handling it in a guest as result of the instruction execution. 219 nopr 7 220.Lsie_leave: 221 BPOFF 222 BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 223.Lsie_skip: 224 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 225 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 226 GET_LC %r14 227 lctlg %c1,%c1,__LC_USER_ASCE(%r14) # load primary asce 228 lg %r14,__LC_CURRENT(%r14) 229 mvi __TI_sie(%r14),0 230SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL) 231 stnsm __SF_SIE_IRQ(%r15),0xfc # disable interrupts 232 lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area 233 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 234 xgr %r0,%r0 # clear guest registers to 235 xgr %r1,%r1 # prevent speculative use 236 xgr %r3,%r3 237 xgr %r4,%r4 238 xgr %r5,%r5 239 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 240 lg %r2,__SF_SIE_RETURN(%r15) # return sie return code 241 BR_EX %r14 242SYM_FUNC_END(__sie64a) 243EXPORT_SYMBOL(__sie64a) 244EXPORT_SYMBOL(sie_exit) 245#endif 246 247/* 248 * SVC interrupt handler routine. System calls are synchronous events and 249 * are entered with interrupts disabled. 250 */ 251 252SYM_CODE_START(system_call) 253 STMG_LC %r8,%r15,__LC_SAVE_AREA 254 GET_LC %r13 255 stpt __LC_SYS_ENTER_TIMER(%r13) 256 BPOFF 257 lghi %r14,0 258.Lsysc_per: 259 STBEAR __LC_LAST_BREAK(%r13) 260 lg %r15,__LC_KERNEL_STACK(%r13) 261 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 262 stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 263 # clear user controlled register to prevent speculative use 264 xgr %r0,%r0 265 xgr %r1,%r1 266 xgr %r4,%r4 267 xgr %r5,%r5 268 xgr %r6,%r6 269 xgr %r7,%r7 270 xgr %r8,%r8 271 xgr %r9,%r9 272 xgr %r10,%r10 273 xgr %r11,%r11 274 xgr %r12,%r12 275 la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 276 mvc __PT_R8(64,%r2),__LC_SAVE_AREA(%r13) 277 MBEAR %r2,%r13 278 lgr %r3,%r14 279 brasl %r14,__do_syscall 280 STACKLEAK_ERASE 281 mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 282 BPON 283 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) 284 stpt __LC_EXIT_TIMER(%r13) 285 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 286 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 287SYM_CODE_END(system_call) 288 289# 290# a new process exits the kernel with ret_from_fork 291# 292SYM_CODE_START(ret_from_fork) 293 lgr %r3,%r11 294 brasl %r14,__ret_from_fork 295 STACKLEAK_ERASE 296 GET_LC %r13 297 mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 298 BPON 299 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) 300 stpt __LC_EXIT_TIMER(%r13) 301 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 302 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 303SYM_CODE_END(ret_from_fork) 304 305/* 306 * Program check handler routine 307 */ 308 309SYM_CODE_START(pgm_check_handler) 310 STMG_LC %r8,%r15,__LC_SAVE_AREA 311 GET_LC %r13 312 stpt __LC_SYS_ENTER_TIMER(%r13) 313 BPOFF 314 lmg %r8,%r9,__LC_PGM_OLD_PSW(%r13) 315 xgr %r10,%r10 316 tmhh %r8,0x0001 # coming from user space? 317 jo 3f # -> fault in user space 318#if IS_ENABLED(CONFIG_KVM) 319 lg %r11,__LC_CURRENT(%r13) 320 tm __TI_sie(%r11),0xff 321 jz 1f 322 BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 323 SIEEXIT __SF_SIE_CONTROL(%r15),%r13 324 lghi %r10,_PIF_GUEST_FAULT 325#endif 3261: tmhh %r8,0x4000 # PER bit set in old PSW ? 327 jnz 2f # -> enabled, can't be a double fault 328 tm __LC_PGM_ILC+3(%r13),0x80 # check for per exception 329 jnz .Lpgm_svcper # -> single stepped svc 3302: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 331 # CHECK_VMAP_STACK branches to stack_invalid or 4f 332 CHECK_VMAP_STACK __LC_SAVE_AREA,%r13,4f 3333: lg %r15,__LC_KERNEL_STACK(%r13) 3344: la %r11,STACK_FRAME_OVERHEAD(%r15) 335 stg %r10,__PT_FLAGS(%r11) 336 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 337 stmg %r0,%r7,__PT_R0(%r11) 338 mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13) 339 mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK(%r13) 340 stmg %r8,%r9,__PT_PSW(%r11) 341 # clear user controlled registers to prevent speculative use 342 xgr %r0,%r0 343 xgr %r1,%r1 344 xgr %r3,%r3 345 xgr %r4,%r4 346 xgr %r5,%r5 347 xgr %r6,%r6 348 xgr %r7,%r7 349 xgr %r12,%r12 350 lgr %r2,%r11 351 brasl %r14,__do_pgm_check 352 tmhh %r8,0x0001 # returning to user space? 353 jno .Lpgm_exit_kernel 354 STACKLEAK_ERASE 355 BPON 356 stpt __LC_EXIT_TIMER(%r13) 357.Lpgm_exit_kernel: 358 mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 359 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) 360 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 361 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 362 363# 364# single stepped system call 365# 366.Lpgm_svcper: 367 mvc __LC_RETURN_PSW(8,%r13),__LC_SVC_NEW_PSW(%r13) 368 larl %r14,.Lsysc_per 369 stg %r14,__LC_RETURN_PSW+8(%r13) 370 lghi %r14,1 371 LBEAR __LC_PGM_LAST_BREAK(%r13) 372 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per 373SYM_CODE_END(pgm_check_handler) 374 375/* 376 * Interrupt handler macro used for external and IO interrupts. 377 */ 378.macro INT_HANDLER name,lc_old_psw,handler 379SYM_CODE_START(\name) 380 STMG_LC %r8,%r15,__LC_SAVE_AREA 381 GET_LC %r13 382 stckf __LC_INT_CLOCK(%r13) 383 stpt __LC_SYS_ENTER_TIMER(%r13) 384 STBEAR __LC_LAST_BREAK(%r13) 385 BPOFF 386 lmg %r8,%r9,\lc_old_psw(%r13) 387 tmhh %r8,0x0001 # interrupting from user ? 388 jnz 1f 389#if IS_ENABLED(CONFIG_KVM) 390 lg %r10,__LC_CURRENT(%r13) 391 tm __TI_sie(%r10),0xff 392 jz 0f 393 BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 394 SIEEXIT __SF_SIE_CONTROL(%r15),%r13 395#endif 3960: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 397 j 2f 3981: lg %r15,__LC_KERNEL_STACK(%r13) 3992: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 400 la %r11,STACK_FRAME_OVERHEAD(%r15) 401 stmg %r0,%r7,__PT_R0(%r11) 402 # clear user controlled registers to prevent speculative use 403 xgr %r0,%r0 404 xgr %r1,%r1 405 xgr %r3,%r3 406 xgr %r4,%r4 407 xgr %r5,%r5 408 xgr %r6,%r6 409 xgr %r7,%r7 410 xgr %r10,%r10 411 xgr %r12,%r12 412 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 413 mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13) 414 MBEAR %r11,%r13 415 stmg %r8,%r9,__PT_PSW(%r11) 416 lgr %r2,%r11 # pass pointer to pt_regs 417 brasl %r14,\handler 418 mvc __LC_RETURN_PSW(16,%r13),__PT_PSW(%r11) 419 tmhh %r8,0x0001 # returning to user ? 420 jno 2f 421 STACKLEAK_ERASE 422 BPON 423 stpt __LC_EXIT_TIMER(%r13) 4242: LBEAR __PT_LAST_BREAK(%r11) 425 lmg %r0,%r15,__PT_R0(%r11) 426 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 427SYM_CODE_END(\name) 428.endm 429 430 .section .irqentry.text, "ax" 431 432INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq 433INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq 434 435 .section .kprobes.text, "ax" 436 437/* 438 * Machine check handler routines 439 */ 440SYM_CODE_START(mcck_int_handler) 441 BPOFF 442 GET_LC %r13 443 lmg %r8,%r9,__LC_MCK_OLD_PSW(%r13) 444 TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE_SYSTEM_DAMAGE 445 jo .Lmcck_panic # yes -> rest of mcck code invalid 446 TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE_CR_VALID 447 jno .Lmcck_panic # control registers invalid -> panic 448 ptlb 449 lay %r14,__LC_CPU_TIMER_SAVE_AREA(%r13) 450 mvc __LC_MCCK_ENTER_TIMER(8,%r13),0(%r14) 451 TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE_CPU_TIMER_VALID 452 jo 3f 453 la %r14,__LC_SYS_ENTER_TIMER(%r13) 454 clc 0(8,%r14),__LC_EXIT_TIMER(%r13) 455 jl 1f 456 la %r14,__LC_EXIT_TIMER(%r13) 4571: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER(%r13) 458 jl 2f 459 la %r14,__LC_LAST_UPDATE_TIMER(%r13) 4602: spt 0(%r14) 461 mvc __LC_MCCK_ENTER_TIMER(8,%r13),0(%r14) 4623: TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE_PSW_MWP_VALID 463 jno .Lmcck_panic 464 tmhh %r8,0x0001 # interrupting from user ? 465 jnz .Lmcck_user 466 TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE_PSW_IA_VALID 467 jno .Lmcck_panic 468#if IS_ENABLED(CONFIG_KVM) 469 lg %r10,__LC_CURRENT(%r13) 470 tm __TI_sie(%r10),0xff 471 jz .Lmcck_user 472 # Need to compare the address instead of __TI_SIE flag. 473 # Otherwise there would be a race between setting the flag 474 # and entering SIE (or leaving and clearing the flag). This 475 # would cause machine checks targeted at the guest to be 476 # handled by the host. 477 larl %r14,.Lsie_entry 478 clgrjl %r9,%r14, 4f 479 larl %r14,.Lsie_leave 480 clgrjhe %r9,%r14, 4f 481 lg %r10,__LC_PCPU(%r13) 482 oi __PCPU_FLAGS+7(%r10), _CIF_MCCK_GUEST 4834: BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 484 SIEEXIT __SF_SIE_CONTROL(%r15),%r13 485#endif 486.Lmcck_user: 487 lg %r15,__LC_MCCK_STACK(%r13) 488 la %r11,STACK_FRAME_OVERHEAD(%r15) 489 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 490 lay %r14,__LC_GPREGS_SAVE_AREA(%r13) 491 mvc __PT_R0(128,%r11),0(%r14) 492 # clear user controlled registers to prevent speculative use 493 xgr %r0,%r0 494 xgr %r1,%r1 495 xgr %r3,%r3 496 xgr %r4,%r4 497 xgr %r5,%r5 498 xgr %r6,%r6 499 xgr %r7,%r7 500 xgr %r10,%r10 501 xgr %r12,%r12 502 stmg %r8,%r9,__PT_PSW(%r11) 503 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 504 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 505 lgr %r2,%r11 # pass pointer to pt_regs 506 brasl %r14,s390_do_machine_check 507 lmg %r0,%r10,__PT_R0(%r11) 508 mvc __LC_RETURN_MCCK_PSW(16,%r13),__PT_PSW(%r11) # move return PSW 509 tm __LC_RETURN_MCCK_PSW+1(%r13),0x01 # returning to user ? 510 jno 0f 511 BPON 512 stpt __LC_EXIT_TIMER(%r13) 5130: ALTERNATIVE "brcl 0,0", __stringify(lay %r12,__LC_LAST_BREAK_SAVE_AREA(%r13)),\ 514 ALT_FACILITY(193) 515 LBEAR 0(%r12) 516 lmg %r11,%r15,__PT_R11(%r11) 517 LPSWEY __LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE 518 519.Lmcck_panic: 520 /* 521 * Iterate over all possible CPU addresses in the range 0..0xffff 522 * and stop each CPU using signal processor. Use compare and swap 523 * to allow just one CPU-stopper and prevent concurrent CPUs from 524 * stopping each other while leaving the others running. 525 */ 526 lhi %r5,0 527 lhi %r6,1 528 larl %r7,stop_lock 529 cs %r5,%r6,0(%r7) # single CPU-stopper only 530 jnz 4f 531 larl %r7,this_cpu 532 stap 0(%r7) # this CPU address 533 lh %r4,0(%r7) 534 nilh %r4,0 535 lhi %r0,1 536 sll %r0,16 # CPU counter 537 lhi %r3,0 # next CPU address 5380: cr %r3,%r4 539 je 2f 5401: sigp %r1,%r3,SIGP_STOP # stop next CPU 541 brc SIGP_CC_BUSY,1b 5422: ahi %r3,1 543 brct %r0,0b 5443: sigp %r1,%r4,SIGP_STOP # stop this CPU 545 brc SIGP_CC_BUSY,3b 5464: j 4b 547SYM_CODE_END(mcck_int_handler) 548 549SYM_CODE_START(restart_int_handler) 550 ALTERNATIVE "nop", "lpp _LPP_OFFSET", ALT_FACILITY(40) 551 stg %r15,__LC_SAVE_AREA_RESTART 552 TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4 553 jz 0f 554 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA 5550: larl %r15,daton_psw 556 lpswe 0(%r15) # turn dat on, keep irqs off 557.Ldaton: 558 GET_LC %r15 559 lg %r15,__LC_RESTART_STACK(%r15) 560 xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15) 561 stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 562 GET_LC %r13 563 mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART(%r13) 564 mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW(%r13) 565 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) 566 lg %r1,__LC_RESTART_FN(%r13) # load fn, parm & source cpu 567 lg %r2,__LC_RESTART_DATA(%r13) 568 lgf %r3,__LC_RESTART_SOURCE(%r13) 569 ltgr %r3,%r3 # test source cpu address 570 jm 1f # negative -> skip source stop 5710: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu 572 brc 10,0b # wait for status stored 5731: basr %r14,%r1 # call function 574 stap __SF_EMPTY(%r15) # store cpu address 575 llgh %r3,__SF_EMPTY(%r15) 5762: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu 577 brc 2,2b 5783: j 3b 579SYM_CODE_END(restart_int_handler) 580 581 __INIT 582SYM_CODE_START(early_pgm_check_handler) 583 STMG_LC %r8,%r15,__LC_SAVE_AREA 584 GET_LC %r13 585 aghi %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE) 586 la %r11,STACK_FRAME_OVERHEAD(%r15) 587 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 588 stmg %r0,%r7,__PT_R0(%r11) 589 mvc __PT_PSW(16,%r11),__LC_PGM_OLD_PSW(%r13) 590 mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13) 591 lgr %r2,%r11 592 brasl %r14,__do_early_pgm_check 593 mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 594 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 595 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 596SYM_CODE_END(early_pgm_check_handler) 597 __FINIT 598 599 .section .kprobes.text, "ax" 600 601/* 602 * The synchronous or the asynchronous stack pointer is invalid. We are dead. 603 * No need to properly save the registers, we are going to panic anyway. 604 * Setup a pt_regs so that show_trace can provide a good call trace. 605 */ 606SYM_CODE_START(stack_invalid) 607 GET_LC %r15 608 lg %r15,__LC_NODAT_STACK(%r15) # change to panic stack 609 la %r11,STACK_FRAME_OVERHEAD(%r15) 610 stmg %r0,%r7,__PT_R0(%r11) 611 stmg %r8,%r9,__PT_PSW(%r11) 612 mvc __PT_R8(64,%r11),0(%r14) 613 GET_LC %r2 614 mvc __PT_ORIG_GPR2(8,%r11),__LC_PGM_LAST_BREAK(%r2) 615 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 616 lgr %r2,%r11 # pass pointer to pt_regs 617 jg kernel_stack_invalid 618SYM_CODE_END(stack_invalid) 619 620 .section .data, "aw" 621 .balign 4 622SYM_DATA_LOCAL(stop_lock, .long 0) 623SYM_DATA_LOCAL(this_cpu, .short 0) 624 .balign 8 625SYM_DATA_START_LOCAL(daton_psw) 626 .quad PSW_KERNEL_BITS 627 .quad .Ldaton 628SYM_DATA_END(daton_psw) 629