1/* 2 * arch/s390/kernel/entry.S 3 * S390 low-level entry points. 4 * 5 * S390 version 6 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 8 * Hartmut Penner (hp@de.ibm.com), 9 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 10 */ 11 12#include <linux/sys.h> 13#include <linux/linkage.h> 14#include <linux/config.h> 15#include <asm/cache.h> 16#include <asm/lowcore.h> 17#include <asm/errno.h> 18#include <asm/ptrace.h> 19#include <asm/thread_info.h> 20#include <asm/offsets.h> 21#include <asm/unistd.h> 22#include <asm/page.h> 23 24/* 25 * Stack layout for the system_call stack entry. 26 * The first few entries are identical to the user_regs_struct. 27 */ 28SP_PTREGS = STACK_FRAME_OVERHEAD 29SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS 30SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW 31SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS 32SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 4 33SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8 34SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 12 35SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16 36SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 20 37SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24 38SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 28 39SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32 40SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 36 41SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40 42SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 44 43SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48 44SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 52 45SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56 46SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 60 47SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 48SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC 49SP_TRAP = STACK_FRAME_OVERHEAD + __PT_TRAP 50SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE 51 52_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ 53 _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) 54_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED) 55 56STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER 57STACK_SIZE = 1 << STACK_SHIFT 58 59#define BASED(name) name-system_call(%r13) 60 61/* 62 * Register usage in interrupt handlers: 63 * R9 - pointer to current task structure 64 * R13 - pointer to literal pool 65 * R14 - return register for function calls 66 * R15 - kernel stack pointer 67 */ 68 69 .macro STORE_TIMER lc_offset 70#ifdef CONFIG_VIRT_CPU_ACCOUNTING 71 stpt \lc_offset 72#endif 73 .endm 74 75#ifdef CONFIG_VIRT_CPU_ACCOUNTING 76 .macro UPDATE_VTIME lc_from,lc_to,lc_sum 77 lm %r10,%r11,\lc_from 78 sl %r10,\lc_to 79 sl %r11,\lc_to+4 80 bc 3,BASED(0f) 81 sl %r10,BASED(.Lc_1) 820: al %r10,\lc_sum 83 al %r11,\lc_sum+4 84 bc 12,BASED(1f) 85 al %r10,BASED(.Lc_1) 861: stm %r10,%r11,\lc_sum 87 .endm 88#endif 89 90 .macro SAVE_ALL_BASE savearea 91 stm %r12,%r15,\savearea 92 l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13 93 .endm 94 95 .macro SAVE_ALL psworg,savearea,sync 96 la %r12,\psworg 97 .if \sync 98 tm \psworg+1,0x01 # test problem state bit 99 bz BASED(2f) # skip stack setup save 100 l %r15,__LC_KERNEL_STACK # problem state -> load ksp 101 .else 102 tm \psworg+1,0x01 # test problem state bit 103 bnz BASED(1f) # from user -> load async stack 104 clc \psworg+4(4),BASED(.Lcritical_end) 105 bhe BASED(0f) 106 clc \psworg+4(4),BASED(.Lcritical_start) 107 bl BASED(0f) 108 l %r14,BASED(.Lcleanup_critical) 109 basr %r14,%r14 110 tm 0(%r12),0x01 # retest problem state after cleanup 111 bnz BASED(1f) 1120: l %r14,__LC_ASYNC_STACK # are we already on the async stack ? 113 slr %r14,%r15 114 sra %r14,STACK_SHIFT 115 be BASED(2f) 1161: l %r15,__LC_ASYNC_STACK 117 .endif 118#ifdef CONFIG_CHECK_STACK 119 b BASED(3f) 1202: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 121 bz BASED(stack_overflow) 1223: 123#endif 1242: s %r15,BASED(.Lc_spsize) # make room for registers & psw 125 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack 126 la %r12,\psworg 127 st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 128 icm %r12,12,__LC_SVC_ILC 129 stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack 130 st %r12,SP_ILC(%r15) 131 mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack 132 la %r12,0 133 st %r12,__SF_BACKCHAIN(%r15) # clear back chain 134 .endm 135 136 .macro RESTORE_ALL sync 137 mvc __LC_RETURN_PSW(8),SP_PSW(%r15) # move user PSW to lowcore 138 .if !\sync 139 ni __LC_RETURN_PSW+1,0xfd # clear wait state bit 140 .endif 141 lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user 142 STORE_TIMER __LC_EXIT_TIMER 143 lpsw __LC_RETURN_PSW # back to caller 144 .endm 145 146/* 147 * Scheduler resume function, called by switch_to 148 * gpr2 = (task_struct *) prev 149 * gpr3 = (task_struct *) next 150 * Returns: 151 * gpr2 = prev 152 */ 153 .globl __switch_to 154__switch_to: 155 basr %r1,0 156__switch_to_base: 157 tm __THREAD_per(%r3),0xe8 # new process is using per ? 158 bz __switch_to_noper-__switch_to_base(%r1) # if not we're fine 159 stctl %c9,%c11,__SF_EMPTY(%r15) # We are using per stuff 160 clc __THREAD_per(12,%r3),__SF_EMPTY(%r15) 161 be __switch_to_noper-__switch_to_base(%r1) # we got away w/o bashing TLB's 162 lctl %c9,%c11,__THREAD_per(%r3) # Nope we didn't 163__switch_to_noper: 164 stm %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task 165 st %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp 166 l %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp 167 lm %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task 168 st %r3,__LC_CURRENT # __LC_CURRENT = current task struct 169 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 170 l %r3,__THREAD_info(%r3) # load thread_info from task struct 171 st %r3,__LC_THREAD_INFO 172 ahi %r3,STACK_SIZE 173 st %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack 174 br %r14 175 176__critical_start: 177/* 178 * SVC interrupt handler routine. System calls are synchronous events and 179 * are executed with interrupts enabled. 180 */ 181 182 .globl system_call 183system_call: 184 STORE_TIMER __LC_SYNC_ENTER_TIMER 185sysc_saveall: 186 SAVE_ALL_BASE __LC_SAVE_AREA 187 SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 188 lh %r7,0x8a # get svc number from lowcore 189#ifdef CONFIG_VIRT_CPU_ACCOUNTING 190sysc_vtime: 191 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 192 bz BASED(sysc_do_svc) 193 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 194sysc_stime: 195 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 196sysc_update: 197 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 198#endif 199sysc_do_svc: 200 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 201 sla %r7,2 # *4 and test for svc 0 202 bnz BASED(sysc_nr_ok) # svc number > 0 203 # svc 0: system call number in %r1 204 cl %r1,BASED(.Lnr_syscalls) 205 bnl BASED(sysc_nr_ok) 206 lr %r7,%r1 # copy svc number to %r7 207 sla %r7,2 # *4 208sysc_nr_ok: 209 mvc SP_ARGS(4,%r15),SP_R7(%r15) 210sysc_do_restart: 211 tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) 212 l %r8,sys_call_table-system_call(%r7,%r13) # get system call addr. 213 bnz BASED(sysc_tracesys) 214 basr %r14,%r8 # call sys_xxxx 215 st %r2,SP_R2(%r15) # store return value (change R2 on stack) 216 # ATTENTION: check sys_execve_glue before 217 # changing anything here !! 218 219sysc_return: 220 tm SP_PSW+1(%r15),0x01 # returning to user ? 221 bno BASED(sysc_leave) 222 tm __TI_flags+3(%r9),_TIF_WORK_SVC 223 bnz BASED(sysc_work) # there is work to do (signals etc.) 224sysc_leave: 225 RESTORE_ALL 1 226 227# 228# recheck if there is more work to do 229# 230sysc_work_loop: 231 tm __TI_flags+3(%r9),_TIF_WORK_SVC 232 bz BASED(sysc_leave) # there is no work to do 233# 234# One of the work bits is on. Find out which one. 235# 236sysc_work: 237 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 238 bo BASED(sysc_reschedule) 239 tm __TI_flags+3(%r9),_TIF_SIGPENDING 240 bo BASED(sysc_sigpending) 241 tm __TI_flags+3(%r9),_TIF_RESTART_SVC 242 bo BASED(sysc_restart) 243 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP 244 bo BASED(sysc_singlestep) 245 b BASED(sysc_leave) 246 247# 248# _TIF_NEED_RESCHED is set, call schedule 249# 250sysc_reschedule: 251 l %r1,BASED(.Lschedule) 252 la %r14,BASED(sysc_work_loop) 253 br %r1 # call scheduler 254 255# 256# _TIF_SIGPENDING is set, call do_signal 257# 258sysc_sigpending: 259 ni __TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 260 la %r2,SP_PTREGS(%r15) # load pt_regs 261 sr %r3,%r3 # clear *oldset 262 l %r1,BASED(.Ldo_signal) 263 basr %r14,%r1 # call do_signal 264 tm __TI_flags+3(%r9),_TIF_RESTART_SVC 265 bo BASED(sysc_restart) 266 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP 267 bo BASED(sysc_singlestep) 268 b BASED(sysc_leave) # out of here, do NOT recheck 269 270# 271# _TIF_RESTART_SVC is set, set up registers and restart svc 272# 273sysc_restart: 274 ni __TI_flags+3(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC 275 l %r7,SP_R2(%r15) # load new svc number 276 sla %r7,2 277 mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument 278 lm %r2,%r6,SP_R2(%r15) # load svc arguments 279 b BASED(sysc_do_restart) # restart svc 280 281# 282# _TIF_SINGLE_STEP is set, call do_single_step 283# 284sysc_singlestep: 285 ni __TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 286 mvi SP_TRAP+1(%r15),0x28 # set trap indication to pgm check 287 la %r2,SP_PTREGS(%r15) # address of register-save area 288 l %r1,BASED(.Lhandle_per) # load adr. of per handler 289 la %r14,BASED(sysc_return) # load adr. of system return 290 br %r1 # branch to do_single_step 291 292__critical_end: 293 294# 295# call trace before and after sys_call 296# 297sysc_tracesys: 298 l %r1,BASED(.Ltrace) 299 la %r2,SP_PTREGS(%r15) # load pt_regs 300 la %r3,0 301 srl %r7,2 302 st %r7,SP_R2(%r15) 303 basr %r14,%r1 304 clc SP_R2(4,%r15),BASED(.Lnr_syscalls) 305 bnl BASED(sysc_tracenogo) 306 l %r7,SP_R2(%r15) # strace might have changed the 307 sll %r7,2 # system call 308 l %r8,sys_call_table-system_call(%r7,%r13) 309sysc_tracego: 310 lm %r3,%r6,SP_R3(%r15) 311 l %r2,SP_ORIG_R2(%r15) 312 basr %r14,%r8 # call sys_xxx 313 st %r2,SP_R2(%r15) # store return value 314sysc_tracenogo: 315 tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) 316 bz BASED(sysc_return) 317 l %r1,BASED(.Ltrace) 318 la %r2,SP_PTREGS(%r15) # load pt_regs 319 la %r3,1 320 la %r14,BASED(sysc_return) 321 br %r1 322 323# 324# a new process exits the kernel with ret_from_fork 325# 326 .globl ret_from_fork 327ret_from_fork: 328 l %r13,__LC_SVC_NEW_PSW+4 329 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 330 tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? 331 bo BASED(0f) 332 st %r15,SP_R15(%r15) # store stack pointer for new kthread 3330: l %r1,BASED(.Lschedtail) 334 basr %r14,%r1 335 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 336 b BASED(sysc_return) 337 338# 339# clone, fork, vfork, exec and sigreturn need glue, 340# because they all expect pt_regs as parameter, 341# but are called with different parameter. 342# return-address is set up above 343# 344sys_clone_glue: 345 la %r2,SP_PTREGS(%r15) # load pt_regs 346 l %r1,BASED(.Lclone) 347 br %r1 # branch to sys_clone 348 349sys_fork_glue: 350 la %r2,SP_PTREGS(%r15) # load pt_regs 351 l %r1,BASED(.Lfork) 352 br %r1 # branch to sys_fork 353 354sys_vfork_glue: 355 la %r2,SP_PTREGS(%r15) # load pt_regs 356 l %r1,BASED(.Lvfork) 357 br %r1 # branch to sys_vfork 358 359sys_execve_glue: 360 la %r2,SP_PTREGS(%r15) # load pt_regs 361 l %r1,BASED(.Lexecve) 362 lr %r12,%r14 # save return address 363 basr %r14,%r1 # call sys_execve 364 ltr %r2,%r2 # check if execve failed 365 bnz 0(%r12) # it did fail -> store result in gpr2 366 b 4(%r12) # SKIP ST 2,SP_R2(15) after BASR 14,8 367 # in system_call/sysc_tracesys 368 369sys_sigreturn_glue: 370 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter 371 l %r1,BASED(.Lsigreturn) 372 br %r1 # branch to sys_sigreturn 373 374sys_rt_sigreturn_glue: 375 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter 376 l %r1,BASED(.Lrt_sigreturn) 377 br %r1 # branch to sys_sigreturn 378 379# 380# sigsuspend and rt_sigsuspend need pt_regs as an additional 381# parameter and they have to skip the store of %r2 into the 382# user register %r2 because the return value was set in 383# sigsuspend and rt_sigsuspend already and must not be overwritten! 384# 385 386sys_sigsuspend_glue: 387 lr %r5,%r4 # move mask back 388 lr %r4,%r3 # move history1 parameter 389 lr %r3,%r2 # move history0 parameter 390 la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter 391 l %r1,BASED(.Lsigsuspend) 392 la %r14,4(%r14) # skip store of return value 393 br %r1 # branch to sys_sigsuspend 394 395sys_rt_sigsuspend_glue: 396 lr %r4,%r3 # move sigsetsize parameter 397 lr %r3,%r2 # move unewset parameter 398 la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter 399 l %r1,BASED(.Lrt_sigsuspend) 400 la %r14,4(%r14) # skip store of return value 401 br %r1 # branch to sys_rt_sigsuspend 402 403sys_sigaltstack_glue: 404 la %r4,SP_PTREGS(%r15) # load pt_regs as parameter 405 l %r1,BASED(.Lsigaltstack) 406 br %r1 # branch to sys_sigreturn 407 408 409/* 410 * Program check handler routine 411 */ 412 413 .globl pgm_check_handler 414pgm_check_handler: 415/* 416 * First we need to check for a special case: 417 * Single stepping an instruction that disables the PER event mask will 418 * cause a PER event AFTER the mask has been set. Example: SVC or LPSW. 419 * For a single stepped SVC the program check handler gets control after 420 * the SVC new PSW has been loaded. But we want to execute the SVC first and 421 * then handle the PER event. Therefore we update the SVC old PSW to point 422 * to the pgm_check_handler and branch to the SVC handler after we checked 423 * if we have to load the kernel stack register. 424 * For every other possible cause for PER event without the PER mask set 425 * we just ignore the PER event (FIXME: is there anything we have to do 426 * for LPSW?). 427 */ 428 STORE_TIMER __LC_SYNC_ENTER_TIMER 429 SAVE_ALL_BASE __LC_SAVE_AREA 430 tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception 431 bnz BASED(pgm_per) # got per exception -> special case 432 SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1 433#ifdef CONFIG_VIRT_CPU_ACCOUNTING 434 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 435 bz BASED(pgm_no_vtime) 436 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 437 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 438 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 439pgm_no_vtime: 440#endif 441 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 442 l %r3,__LC_PGM_ILC # load program interruption code 443 la %r8,0x7f 444 nr %r8,%r3 445pgm_do_call: 446 l %r7,BASED(.Ljump_table) 447 sll %r8,2 448 l %r7,0(%r8,%r7) # load address of handler routine 449 la %r2,SP_PTREGS(%r15) # address of register-save area 450 la %r14,BASED(sysc_return) 451 br %r7 # branch to interrupt-handler 452 453# 454# handle per exception 455# 456pgm_per: 457 tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on 458 bnz BASED(pgm_per_std) # ok, normal per event from user space 459# ok its one of the special cases, now we need to find out which one 460 clc __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW 461 be BASED(pgm_svcper) 462# no interesting special case, ignore PER event 463 lm %r12,%r15,__LC_SAVE_AREA 464 lpsw 0x28 465 466# 467# Normal per exception 468# 469pgm_per_std: 470 SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1 471#ifdef CONFIG_VIRT_CPU_ACCOUNTING 472 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 473 bz BASED(pgm_no_vtime2) 474 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 475 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 476 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 477pgm_no_vtime2: 478#endif 479 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 480 l %r1,__TI_task(%r9) 481 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID 482 mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS 483 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 484 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 485 l %r3,__LC_PGM_ILC # load program interruption code 486 la %r8,0x7f 487 nr %r8,%r3 # clear per-event-bit and ilc 488 be BASED(sysc_return) # only per or per+check ? 489 b BASED(pgm_do_call) 490 491# 492# it was a single stepped SVC that is causing all the trouble 493# 494pgm_svcper: 495 SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 496#ifdef CONFIG_VIRT_CPU_ACCOUNTING 497 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 498 bz BASED(pgm_no_vtime3) 499 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 500 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 501 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 502pgm_no_vtime3: 503#endif 504 lh %r7,0x8a # get svc number from lowcore 505 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 506 l %r1,__TI_task(%r9) 507 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID 508 mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS 509 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 510 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 511 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 512 b BASED(sysc_do_svc) 513 514/* 515 * IO interrupt handler routine 516 */ 517 518 .globl io_int_handler 519io_int_handler: 520 STORE_TIMER __LC_ASYNC_ENTER_TIMER 521 stck __LC_INT_CLOCK 522 SAVE_ALL_BASE __LC_SAVE_AREA+16 523 SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+16,0 524#ifdef CONFIG_VIRT_CPU_ACCOUNTING 525 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 526 bz BASED(io_no_vtime) 527 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 528 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 529 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 530io_no_vtime: 531#endif 532 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 533 l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ 534 la %r2,SP_PTREGS(%r15) # address of register-save area 535 basr %r14,%r1 # branch to standard irq handler 536 537io_return: 538 tm SP_PSW+1(%r15),0x01 # returning to user ? 539#ifdef CONFIG_PREEMPT 540 bno BASED(io_preempt) # no -> check for preemptive scheduling 541#else 542 bno BASED(io_leave) # no-> skip resched & signal 543#endif 544 tm __TI_flags+3(%r9),_TIF_WORK_INT 545 bnz BASED(io_work) # there is work to do (signals etc.) 546io_leave: 547 RESTORE_ALL 0 548 549#ifdef CONFIG_PREEMPT 550io_preempt: 551 icm %r0,15,__TI_precount(%r9) 552 bnz BASED(io_leave) 553 l %r1,SP_R15(%r15) 554 s %r1,BASED(.Lc_spsize) 555 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 556 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain 557 lr %r15,%r1 558io_resume_loop: 559 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 560 bno BASED(io_leave) 561 mvc __TI_precount(4,%r9),BASED(.Lc_pactive) 562 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 563 l %r1,BASED(.Lschedule) 564 basr %r14,%r1 # call schedule 565 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 566 xc __TI_precount(4,%r9),__TI_precount(%r9) 567 b BASED(io_resume_loop) 568#endif 569 570# 571# switch to kernel stack, then check the TIF bits 572# 573io_work: 574 l %r1,__LC_KERNEL_STACK 575 s %r1,BASED(.Lc_spsize) 576 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 577 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain 578 lr %r15,%r1 579# 580# One of the work bits is on. Find out which one. 581# Checked are: _TIF_SIGPENDING and _TIF_NEED_RESCHED 582# 583io_work_loop: 584 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 585 bo BASED(io_reschedule) 586 tm __TI_flags+3(%r9),_TIF_SIGPENDING 587 bo BASED(io_sigpending) 588 b BASED(io_leave) 589 590# 591# _TIF_NEED_RESCHED is set, call schedule 592# 593io_reschedule: 594 l %r1,BASED(.Lschedule) 595 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 596 basr %r14,%r1 # call scheduler 597 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 598 tm __TI_flags+3(%r9),_TIF_WORK_INT 599 bz BASED(io_leave) # there is no work to do 600 b BASED(io_work_loop) 601 602# 603# _TIF_SIGPENDING is set, call do_signal 604# 605io_sigpending: 606 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 607 la %r2,SP_PTREGS(%r15) # load pt_regs 608 sr %r3,%r3 # clear *oldset 609 l %r1,BASED(.Ldo_signal) 610 basr %r14,%r1 # call do_signal 611 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 612 b BASED(io_leave) # out of here, do NOT recheck 613 614/* 615 * External interrupt handler routine 616 */ 617 618 .globl ext_int_handler 619ext_int_handler: 620 STORE_TIMER __LC_ASYNC_ENTER_TIMER 621 stck __LC_INT_CLOCK 622 SAVE_ALL_BASE __LC_SAVE_AREA+16 623 SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16,0 624#ifdef CONFIG_VIRT_CPU_ACCOUNTING 625 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 626 bz BASED(ext_no_vtime) 627 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 628 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 629 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 630ext_no_vtime: 631#endif 632 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 633 la %r2,SP_PTREGS(%r15) # address of register-save area 634 lh %r3,__LC_EXT_INT_CODE # get interruption code 635 l %r1,BASED(.Ldo_extint) 636 basr %r14,%r1 637 b BASED(io_return) 638 639/* 640 * Machine check handler routines 641 */ 642 643 .globl mcck_int_handler 644mcck_int_handler: 645 STORE_TIMER __LC_ASYNC_ENTER_TIMER 646 SAVE_ALL_BASE __LC_SAVE_AREA+32 647 SAVE_ALL __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32,0 648#ifdef CONFIG_VIRT_CPU_ACCOUNTING 649 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 650 bz BASED(mcck_no_vtime) 651 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 652 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 653 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 654mcck_no_vtime: 655#endif 656 l %r1,BASED(.Ls390_mcck) 657 basr %r14,%r1 # call machine check handler 658mcck_return: 659 RESTORE_ALL 0 660 661#ifdef CONFIG_SMP 662/* 663 * Restart interruption handler, kick starter for additional CPUs 664 */ 665 .globl restart_int_handler 666restart_int_handler: 667 l %r15,__LC_SAVE_AREA+60 # load ksp 668 lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs 669 lam %a0,%a15,__LC_AREGS_SAVE_AREA 670 lm %r6,%r15,__SF_GPRS(%r15) # load registers from clone 671 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on 672 basr %r14,0 673 l %r14,restart_addr-.(%r14) 674 br %r14 # branch to start_secondary 675restart_addr: 676 .long start_secondary 677#else 678/* 679 * If we do not run with SMP enabled, let the new CPU crash ... 680 */ 681 .globl restart_int_handler 682restart_int_handler: 683 basr %r1,0 684restart_base: 685 lpsw restart_crash-restart_base(%r1) 686 .align 8 687restart_crash: 688 .long 0x000a0000,0x00000000 689restart_go: 690#endif 691 692#ifdef CONFIG_CHECK_STACK 693/* 694 * The synchronous or the asynchronous stack overflowed. We are dead. 695 * No need to properly save the registers, we are going to panic anyway. 696 * Setup a pt_regs so that show_trace can provide a good call trace. 697 */ 698stack_overflow: 699 l %r15,__LC_PANIC_STACK # change to panic stack 700 sl %r15,BASED(.Lc_spsize) 701 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack 702 stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack 703 la %r1,__LC_SAVE_AREA 704 ch %r12,BASED(.L0x020) # old psw addr == __LC_SVC_OLD_PSW ? 705 be BASED(0f) 706 ch %r12,BASED(.L0x028) # old psw addr == __LC_PGM_OLD_PSW ? 707 be BASED(0f) 708 la %r1,__LC_SAVE_AREA+16 7090: mvc SP_R12(16,%r15),0(%r1) # move %r12-%r15 to stack 710 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain 711 l %r1,BASED(1f) # branch to kernel_stack_overflow 712 la %r2,SP_PTREGS(%r15) # load pt_regs 713 br %r1 7141: .long kernel_stack_overflow 715#endif 716 717cleanup_table_system_call: 718 .long system_call + 0x80000000, sysc_do_svc + 0x80000000 719cleanup_table_sysc_return: 720 .long sysc_return + 0x80000000, sysc_leave + 0x80000000 721cleanup_table_sysc_leave: 722 .long sysc_leave + 0x80000000, sysc_work_loop + 0x80000000 723cleanup_table_sysc_work_loop: 724 .long sysc_work_loop + 0x80000000, sysc_reschedule + 0x80000000 725 726cleanup_critical: 727 clc 4(4,%r12),BASED(cleanup_table_system_call) 728 bl BASED(0f) 729 clc 4(4,%r12),BASED(cleanup_table_system_call+4) 730 bl BASED(cleanup_system_call) 7310: 732 clc 4(4,%r12),BASED(cleanup_table_sysc_return) 733 bl BASED(0f) 734 clc 4(4,%r12),BASED(cleanup_table_sysc_return+4) 735 bl BASED(cleanup_sysc_return) 7360: 737 clc 4(4,%r12),BASED(cleanup_table_sysc_leave) 738 bl BASED(0f) 739 clc 4(4,%r12),BASED(cleanup_table_sysc_leave+4) 740 bl BASED(cleanup_sysc_leave) 7410: 742 clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop) 743 bl BASED(0f) 744 clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4) 745 bl BASED(cleanup_sysc_leave) 7460: 747 br %r14 748 749cleanup_system_call: 750 mvc __LC_RETURN_PSW(8),0(%r12) 751#ifdef CONFIG_VIRT_CPU_ACCOUNTING 752 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4) 753 bh BASED(0f) 754 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 7550: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8) 756 bhe BASED(cleanup_vtime) 757#endif 758 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn) 759 bh BASED(0f) 760 mvc __LC_SAVE_AREA(16),__LC_SAVE_AREA+16 7610: st %r13,__LC_SAVE_AREA+20 762 SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 763 st %r15,__LC_SAVE_AREA+28 764 lh %r7,0x8a 765#ifdef CONFIG_VIRT_CPU_ACCOUNTING 766cleanup_vtime: 767 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) 768 bhe BASED(cleanup_stime) 769 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 770 bz BASED(cleanup_novtime) 771 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 772cleanup_stime: 773 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+16) 774 bh BASED(cleanup_update) 775 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 776cleanup_update: 777 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 778cleanup_novtime: 779#endif 780 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4) 781 la %r12,__LC_RETURN_PSW 782 br %r14 783cleanup_system_call_insn: 784 .long sysc_saveall + 0x80000000 785#ifdef CONFIG_VIRT_CPU_ACCOUNTING 786 .long system_call + 0x80000000 787 .long sysc_vtime + 0x80000000 788 .long sysc_stime + 0x80000000 789 .long sysc_update + 0x80000000 790#endif 791 792cleanup_sysc_return: 793 mvc __LC_RETURN_PSW(4),0(%r12) 794 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_return) 795 la %r12,__LC_RETURN_PSW 796 br %r14 797 798cleanup_sysc_leave: 799 clc 4(4,%r12),BASED(cleanup_sysc_leave_insn) 800 be BASED(0f) 801#ifdef CONFIG_VIRT_CPU_ACCOUNTING 802 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER 803 clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4) 804 be BASED(0f) 805#endif 806 mvc __LC_RETURN_PSW(8),SP_PSW(%r15) 807 mvc __LC_SAVE_AREA+16(16),SP_R12(%r15) 808 lm %r0,%r11,SP_R0(%r15) 809 l %r15,SP_R15(%r15) 8100: la %r12,__LC_RETURN_PSW 811 br %r14 812cleanup_sysc_leave_insn: 813#ifdef CONFIG_VIRT_CPU_ACCOUNTING 814 .long sysc_leave + 14 + 0x80000000 815#endif 816 .long sysc_leave + 10 + 0x80000000 817 818/* 819 * Integer constants 820 */ 821 .align 4 822.Lc_spsize: .long SP_SIZE 823.Lc_overhead: .long STACK_FRAME_OVERHEAD 824.Lc_pactive: .long PREEMPT_ACTIVE 825.Lnr_syscalls: .long NR_syscalls 826.L0x018: .short 0x018 827.L0x020: .short 0x020 828.L0x028: .short 0x028 829.L0x030: .short 0x030 830.L0x038: .short 0x038 831.Lc_1: .long 1 832 833/* 834 * Symbol constants 835 */ 836.Ls390_mcck: .long s390_do_machine_check 837.Ldo_IRQ: .long do_IRQ 838.Ldo_extint: .long do_extint 839.Ldo_signal: .long do_signal 840.Lhandle_per: .long do_single_step 841.Ljump_table: .long pgm_check_table 842.Lschedule: .long schedule 843.Lclone: .long sys_clone 844.Lexecve: .long sys_execve 845.Lfork: .long sys_fork 846.Lrt_sigreturn:.long sys_rt_sigreturn 847.Lrt_sigsuspend: 848 .long sys_rt_sigsuspend 849.Lsigreturn: .long sys_sigreturn 850.Lsigsuspend: .long sys_sigsuspend 851.Lsigaltstack: .long sys_sigaltstack 852.Ltrace: .long syscall_trace 853.Lvfork: .long sys_vfork 854.Lschedtail: .long schedule_tail 855 856.Lcritical_start: 857 .long __critical_start + 0x80000000 858.Lcritical_end: 859 .long __critical_end + 0x80000000 860.Lcleanup_critical: 861 .long cleanup_critical 862 863#define SYSCALL(esa,esame,emu) .long esa 864 .globl sys_call_table 865sys_call_table: 866#include "syscalls.S" 867#undef SYSCALL 868 869