1/* 2 * S390 low-level entry points. 3 * 4 * Copyright IBM Corp. 1999, 2012 5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 6 * Hartmut Penner (hp@de.ibm.com), 7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 8 * Heiko Carstens <heiko.carstens@de.ibm.com> 9 */ 10 11#include <linux/init.h> 12#include <linux/linkage.h> 13#include <asm/processor.h> 14#include <asm/cache.h> 15#include <asm/errno.h> 16#include <asm/ptrace.h> 17#include <asm/thread_info.h> 18#include <asm/asm-offsets.h> 19#include <asm/unistd.h> 20#include <asm/page.h> 21#include <asm/sigp.h> 22#include <asm/irq.h> 23 24__PT_R0 = __PT_GPRS 25__PT_R1 = __PT_GPRS + 4 26__PT_R2 = __PT_GPRS + 8 27__PT_R3 = __PT_GPRS + 12 28__PT_R4 = __PT_GPRS + 16 29__PT_R5 = __PT_GPRS + 20 30__PT_R6 = __PT_GPRS + 24 31__PT_R7 = __PT_GPRS + 28 32__PT_R8 = __PT_GPRS + 32 33__PT_R9 = __PT_GPRS + 36 34__PT_R10 = __PT_GPRS + 40 35__PT_R11 = __PT_GPRS + 44 36__PT_R12 = __PT_GPRS + 48 37__PT_R13 = __PT_GPRS + 524 38__PT_R14 = __PT_GPRS + 56 39__PT_R15 = __PT_GPRS + 60 40 41STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER 42STACK_SIZE = 1 << STACK_SHIFT 43STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE 44 45_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED) 46_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ 47 _TIF_SYSCALL_TRACEPOINT) 48_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE) 49_PIF_WORK = (_PIF_PER_TRAP) 50 51#define BASED(name) name-system_call(%r13) 52 53 .macro TRACE_IRQS_ON 54#ifdef CONFIG_TRACE_IRQFLAGS 55 basr %r2,%r0 56 l %r1,BASED(.Lc_hardirqs_on) 57 basr %r14,%r1 # call trace_hardirqs_on_caller 58#endif 59 .endm 60 61 .macro TRACE_IRQS_OFF 62#ifdef CONFIG_TRACE_IRQFLAGS 63 basr %r2,%r0 64 l %r1,BASED(.Lc_hardirqs_off) 65 basr %r14,%r1 # call trace_hardirqs_off_caller 66#endif 67 .endm 68 69 .macro LOCKDEP_SYS_EXIT 70#ifdef CONFIG_LOCKDEP 71 tm __PT_PSW+1(%r11),0x01 # returning to user ? 72 jz .+10 73 l %r1,BASED(.Lc_lockdep_sys_exit) 74 basr %r14,%r1 # call lockdep_sys_exit 75#endif 76 .endm 77 78 .macro CHECK_STACK stacksize,savearea 79#ifdef CONFIG_CHECK_STACK 80 tml %r15,\stacksize - CONFIG_STACK_GUARD 81 la %r14,\savearea 82 jz stack_overflow 83#endif 84 .endm 85 86 .macro SWITCH_ASYNC savearea,stack,shift 87 tmh %r8,0x0001 # interrupting from user ? 88 jnz 1f 89 lr %r14,%r9 90 sl %r14,BASED(.Lc_critical_start) 91 cl %r14,BASED(.Lc_critical_length) 92 jhe 0f 93 la %r11,\savearea # inside critical section, do cleanup 94 bras %r14,cleanup_critical 95 tmh %r8,0x0001 # retest problem state after cleanup 96 jnz 1f 970: l %r14,\stack # are we already on the target stack? 98 slr %r14,%r15 99 sra %r14,\shift 100 jnz 1f 101 CHECK_STACK 1<<\shift,\savearea 102 ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 103 j 2f 1041: l %r15,\stack # load target stack 1052: la %r11,STACK_FRAME_OVERHEAD(%r15) 106 .endm 107 108 .macro ADD64 high,low,timer 109 al \high,\timer 110 al \low,4+\timer 111 brc 12,.+8 112 ahi \high,1 113 .endm 114 115 .macro SUB64 high,low,timer 116 sl \high,\timer 117 sl \low,4+\timer 118 brc 3,.+8 119 ahi \high,-1 120 .endm 121 122 .macro UPDATE_VTIME high,low,enter_timer 123 lm \high,\low,__LC_EXIT_TIMER 124 SUB64 \high,\low,\enter_timer 125 ADD64 \high,\low,__LC_USER_TIMER 126 stm \high,\low,__LC_USER_TIMER 127 lm \high,\low,__LC_LAST_UPDATE_TIMER 128 SUB64 \high,\low,__LC_EXIT_TIMER 129 ADD64 \high,\low,__LC_SYSTEM_TIMER 130 stm \high,\low,__LC_SYSTEM_TIMER 131 mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer 132 .endm 133 134 .macro REENABLE_IRQS 135 st %r8,__LC_RETURN_PSW 136 ni __LC_RETURN_PSW,0xbf 137 ssm __LC_RETURN_PSW 138 .endm 139 140 .section .kprobes.text, "ax" 141 142/* 143 * Scheduler resume function, called by switch_to 144 * gpr2 = (task_struct *) prev 145 * gpr3 = (task_struct *) next 146 * Returns: 147 * gpr2 = prev 148 */ 149ENTRY(__switch_to) 150 stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 151 st %r15,__THREAD_ksp(%r2) # store kernel stack of prev 152 l %r4,__THREAD_info(%r2) # get thread_info of prev 153 l %r5,__THREAD_info(%r3) # get thread_info of next 154 lr %r15,%r5 155 ahi %r15,STACK_INIT # end of kernel stack of next 156 st %r3,__LC_CURRENT # store task struct of next 157 st %r5,__LC_THREAD_INFO # store thread info of next 158 st %r15,__LC_KERNEL_STACK # store end of kernel stack 159 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 160 mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next 161 l %r15,__THREAD_ksp(%r3) # load kernel stack of next 162 lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 163 br %r14 164 165.L__critical_start: 166/* 167 * SVC interrupt handler routine. System calls are synchronous events and 168 * are executed with interrupts enabled. 169 */ 170 171ENTRY(system_call) 172 stpt __LC_SYNC_ENTER_TIMER 173.Lsysc_stm: 174 stm %r8,%r15,__LC_SAVE_AREA_SYNC 175 l %r12,__LC_THREAD_INFO 176 l %r13,__LC_SVC_NEW_PSW+4 177 lhi %r14,_PIF_SYSCALL 178.Lsysc_per: 179 l %r15,__LC_KERNEL_STACK 180 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 181.Lsysc_vtime: 182 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER 183 stm %r0,%r7,__PT_R0(%r11) 184 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC 185 mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW 186 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC 187 st %r14,__PT_FLAGS(%r11) 188.Lsysc_do_svc: 189 l %r10,__TI_sysc_table(%r12) # 31 bit system call table 190 lh %r8,__PT_INT_CODE+2(%r11) 191 sla %r8,2 # shift and test for svc0 192 jnz .Lsysc_nr_ok 193 # svc 0: system call number in %r1 194 cl %r1,BASED(.Lnr_syscalls) 195 jnl .Lsysc_nr_ok 196 sth %r1,__PT_INT_CODE+2(%r11) 197 lr %r8,%r1 198 sla %r8,2 199.Lsysc_nr_ok: 200 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 201 st %r2,__PT_ORIG_GPR2(%r11) 202 st %r7,STACK_FRAME_OVERHEAD(%r15) 203 l %r9,0(%r8,%r10) # get system call addr. 204 tm __TI_flags+3(%r12),_TIF_TRACE 205 jnz .Lsysc_tracesys 206 basr %r14,%r9 # call sys_xxxx 207 st %r2,__PT_R2(%r11) # store return value 208 209.Lsysc_return: 210 LOCKDEP_SYS_EXIT 211.Lsysc_tif: 212 tm __PT_PSW+1(%r11),0x01 # returning to user ? 213 jno .Lsysc_restore 214 tm __PT_FLAGS+3(%r11),_PIF_WORK 215 jnz .Lsysc_work 216 tm __TI_flags+3(%r12),_TIF_WORK 217 jnz .Lsysc_work # check for thread work 218 tm __LC_CPU_FLAGS+3,_CIF_WORK 219 jnz .Lsysc_work 220.Lsysc_restore: 221 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) 222 stpt __LC_EXIT_TIMER 223 lm %r0,%r15,__PT_R0(%r11) 224 lpsw __LC_RETURN_PSW 225.Lsysc_done: 226 227# 228# One of the work bits is on. Find out which one. 229# 230.Lsysc_work: 231 tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING 232 jo .Lsysc_mcck_pending 233 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED 234 jo .Lsysc_reschedule 235 tm __PT_FLAGS+3(%r11),_PIF_PER_TRAP 236 jo .Lsysc_singlestep 237 tm __TI_flags+3(%r12),_TIF_SIGPENDING 238 jo .Lsysc_sigpending 239 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME 240 jo .Lsysc_notify_resume 241 tm __LC_CPU_FLAGS+3,_CIF_ASCE 242 jo .Lsysc_uaccess 243 j .Lsysc_return # beware of critical section cleanup 244 245# 246# _TIF_NEED_RESCHED is set, call schedule 247# 248.Lsysc_reschedule: 249 l %r1,BASED(.Lc_schedule) 250 la %r14,BASED(.Lsysc_return) 251 br %r1 # call schedule 252 253# 254# _CIF_MCCK_PENDING is set, call handler 255# 256.Lsysc_mcck_pending: 257 l %r1,BASED(.Lc_handle_mcck) 258 la %r14,BASED(.Lsysc_return) 259 br %r1 # TIF bit will be cleared by handler 260 261# 262# _CIF_ASCE is set, load user space asce 263# 264.Lsysc_uaccess: 265 ni __LC_CPU_FLAGS+3,255-_CIF_ASCE 266 lctl %c1,%c1,__LC_USER_ASCE # load primary asce 267 j .Lsysc_return 268 269# 270# _TIF_SIGPENDING is set, call do_signal 271# 272.Lsysc_sigpending: 273 lr %r2,%r11 # pass pointer to pt_regs 274 l %r1,BASED(.Lc_do_signal) 275 basr %r14,%r1 # call do_signal 276 tm __PT_FLAGS+3(%r11),_PIF_SYSCALL 277 jno .Lsysc_return 278 lm %r2,%r7,__PT_R2(%r11) # load svc arguments 279 l %r10,__TI_sysc_table(%r12) # 31 bit system call table 280 xr %r8,%r8 # svc 0 returns -ENOSYS 281 clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2) 282 jnl .Lsysc_nr_ok # invalid svc number -> do svc 0 283 lh %r8,__PT_INT_CODE+2(%r11) # load new svc number 284 sla %r8,2 285 j .Lsysc_nr_ok # restart svc 286 287# 288# _TIF_NOTIFY_RESUME is set, call do_notify_resume 289# 290.Lsysc_notify_resume: 291 lr %r2,%r11 # pass pointer to pt_regs 292 l %r1,BASED(.Lc_do_notify_resume) 293 la %r14,BASED(.Lsysc_return) 294 br %r1 # call do_notify_resume 295 296# 297# _PIF_PER_TRAP is set, call do_per_trap 298# 299.Lsysc_singlestep: 300 ni __PT_FLAGS+3(%r11),255-_PIF_PER_TRAP 301 lr %r2,%r11 # pass pointer to pt_regs 302 l %r1,BASED(.Lc_do_per_trap) 303 la %r14,BASED(.Lsysc_return) 304 br %r1 # call do_per_trap 305 306# 307# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before 308# and after the system call 309# 310.Lsysc_tracesys: 311 l %r1,BASED(.Lc_trace_enter) 312 lr %r2,%r11 # pass pointer to pt_regs 313 la %r3,0 314 xr %r0,%r0 315 icm %r0,3,__PT_INT_CODE+2(%r11) 316 st %r0,__PT_R2(%r11) 317 basr %r14,%r1 # call do_syscall_trace_enter 318 cl %r2,BASED(.Lnr_syscalls) 319 jnl .Lsysc_tracenogo 320 lr %r8,%r2 321 sll %r8,2 322 l %r9,0(%r8,%r10) 323.Lsysc_tracego: 324 lm %r3,%r7,__PT_R3(%r11) 325 st %r7,STACK_FRAME_OVERHEAD(%r15) 326 l %r2,__PT_ORIG_GPR2(%r11) 327 basr %r14,%r9 # call sys_xxx 328 st %r2,__PT_R2(%r11) # store return value 329.Lsysc_tracenogo: 330 tm __TI_flags+3(%r12),_TIF_TRACE 331 jz .Lsysc_return 332 l %r1,BASED(.Lc_trace_exit) 333 lr %r2,%r11 # pass pointer to pt_regs 334 la %r14,BASED(.Lsysc_return) 335 br %r1 # call do_syscall_trace_exit 336 337# 338# a new process exits the kernel with ret_from_fork 339# 340ENTRY(ret_from_fork) 341 la %r11,STACK_FRAME_OVERHEAD(%r15) 342 l %r12,__LC_THREAD_INFO 343 l %r13,__LC_SVC_NEW_PSW+4 344 l %r1,BASED(.Lc_schedule_tail) 345 basr %r14,%r1 # call schedule_tail 346 TRACE_IRQS_ON 347 ssm __LC_SVC_NEW_PSW # reenable interrupts 348 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? 349 jne .Lsysc_tracenogo 350 # it's a kernel thread 351 lm %r9,%r10,__PT_R9(%r11) # load gprs 352ENTRY(kernel_thread_starter) 353 la %r2,0(%r10) 354 basr %r14,%r9 355 j .Lsysc_tracenogo 356 357/* 358 * Program check handler routine 359 */ 360 361ENTRY(pgm_check_handler) 362 stpt __LC_SYNC_ENTER_TIMER 363 stm %r8,%r15,__LC_SAVE_AREA_SYNC 364 l %r12,__LC_THREAD_INFO 365 l %r13,__LC_SVC_NEW_PSW+4 366 lm %r8,%r9,__LC_PGM_OLD_PSW 367 tmh %r8,0x0001 # test problem state bit 368 jnz 1f # -> fault in user space 369 tmh %r8,0x4000 # PER bit set in old PSW ? 370 jnz 0f # -> enabled, can't be a double fault 371 tm __LC_PGM_ILC+3,0x80 # check for per exception 372 jnz .Lpgm_svcper # -> single stepped svc 3730: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC 374 ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 375 j 2f 3761: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER 377 l %r15,__LC_KERNEL_STACK 3782: la %r11,STACK_FRAME_OVERHEAD(%r15) 379 stm %r0,%r7,__PT_R0(%r11) 380 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC 381 stm %r8,%r9,__PT_PSW(%r11) 382 mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC 383 mvc __PT_INT_PARM_LONG(4,%r11),__LC_TRANS_EXC_CODE 384 xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) 385 tm __LC_PGM_ILC+3,0x80 # check for per exception 386 jz 0f 387 l %r1,__TI_task(%r12) 388 tmh %r8,0x0001 # kernel per event ? 389 jz .Lpgm_kprobe 390 oi __PT_FLAGS+3(%r11),_PIF_PER_TRAP 391 mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS 392 mvc __THREAD_per_cause(2,%r1),__LC_PER_CODE 393 mvc __THREAD_per_paid(1,%r1),__LC_PER_ACCESS_ID 3940: REENABLE_IRQS 395 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 396 l %r1,BASED(.Lc_jump_table) 397 la %r10,0x7f 398 n %r10,__PT_INT_CODE(%r11) 399 je .Lsysc_return 400 sll %r10,2 401 l %r1,0(%r10,%r1) # load address of handler routine 402 lr %r2,%r11 # pass pointer to pt_regs 403 basr %r14,%r1 # branch to interrupt-handler 404 j .Lsysc_return 405 406# 407# PER event in supervisor state, must be kprobes 408# 409.Lpgm_kprobe: 410 REENABLE_IRQS 411 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 412 l %r1,BASED(.Lc_do_per_trap) 413 lr %r2,%r11 # pass pointer to pt_regs 414 basr %r14,%r1 # call do_per_trap 415 j .Lsysc_return 416 417# 418# single stepped system call 419# 420.Lpgm_svcper: 421 mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW 422 mvc __LC_RETURN_PSW+4(4),BASED(.Lc_sysc_per) 423 lhi %r14,_PIF_SYSCALL | _PIF_PER_TRAP 424 lpsw __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs 425 426/* 427 * IO interrupt handler routine 428 */ 429 430ENTRY(io_int_handler) 431 stck __LC_INT_CLOCK 432 stpt __LC_ASYNC_ENTER_TIMER 433 stm %r8,%r15,__LC_SAVE_AREA_ASYNC 434 l %r12,__LC_THREAD_INFO 435 l %r13,__LC_SVC_NEW_PSW+4 436 lm %r8,%r9,__LC_IO_OLD_PSW 437 tmh %r8,0x0001 # interrupting from user ? 438 jz .Lio_skip 439 UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER 440.Lio_skip: 441 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT 442 stm %r0,%r7,__PT_R0(%r11) 443 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC 444 stm %r8,%r9,__PT_PSW(%r11) 445 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID 446 xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) 447 TRACE_IRQS_OFF 448 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 449.Lio_loop: 450 l %r1,BASED(.Lc_do_IRQ) 451 lr %r2,%r11 # pass pointer to pt_regs 452 lhi %r3,IO_INTERRUPT 453 tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? 454 jz .Lio_call 455 lhi %r3,THIN_INTERRUPT 456.Lio_call: 457 basr %r14,%r1 # call do_IRQ 458 tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR 459 jz .Lio_return 460 tpi 0 461 jz .Lio_return 462 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID 463 j .Lio_loop 464.Lio_return: 465 LOCKDEP_SYS_EXIT 466 TRACE_IRQS_ON 467.Lio_tif: 468 tm __TI_flags+3(%r12),_TIF_WORK 469 jnz .Lio_work # there is work to do (signals etc.) 470 tm __LC_CPU_FLAGS+3,_CIF_WORK 471 jnz .Lio_work 472.Lio_restore: 473 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) 474 stpt __LC_EXIT_TIMER 475 lm %r0,%r15,__PT_R0(%r11) 476 lpsw __LC_RETURN_PSW 477.Lio_done: 478 479# 480# There is work todo, find out in which context we have been interrupted: 481# 1) if we return to user space we can do all _TIF_WORK work 482# 2) if we return to kernel code and preemptive scheduling is enabled check 483# the preemption counter and if it is zero call preempt_schedule_irq 484# Before any work can be done, a switch to the kernel stack is required. 485# 486.Lio_work: 487 tm __PT_PSW+1(%r11),0x01 # returning to user ? 488 jo .Lio_work_user # yes -> do resched & signal 489#ifdef CONFIG_PREEMPT 490 # check for preemptive scheduling 491 icm %r0,15,__TI_precount(%r12) 492 jnz .Lio_restore # preemption disabled 493 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED 494 jno .Lio_restore 495 # switch to kernel stack 496 l %r1,__PT_R15(%r11) 497 ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 498 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 499 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) 500 la %r11,STACK_FRAME_OVERHEAD(%r1) 501 lr %r15,%r1 502 # TRACE_IRQS_ON already done at .Lio_return, call 503 # TRACE_IRQS_OFF to keep things symmetrical 504 TRACE_IRQS_OFF 505 l %r1,BASED(.Lc_preempt_irq) 506 basr %r14,%r1 # call preempt_schedule_irq 507 j .Lio_return 508#else 509 j .Lio_restore 510#endif 511 512# 513# Need to do work before returning to userspace, switch to kernel stack 514# 515.Lio_work_user: 516 l %r1,__LC_KERNEL_STACK 517 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 518 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) 519 la %r11,STACK_FRAME_OVERHEAD(%r1) 520 lr %r15,%r1 521 522# 523# One of the work bits is on. Find out which one. 524# 525.Lio_work_tif: 526 tm __LC_CPU_FLAGS+3(%r12),_CIF_MCCK_PENDING 527 jo .Lio_mcck_pending 528 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED 529 jo .Lio_reschedule 530 tm __TI_flags+3(%r12),_TIF_SIGPENDING 531 jo .Lio_sigpending 532 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME 533 jo .Lio_notify_resume 534 tm __LC_CPU_FLAGS+3,_CIF_ASCE 535 jo .Lio_uaccess 536 j .Lio_return # beware of critical section cleanup 537 538# 539# _CIF_MCCK_PENDING is set, call handler 540# 541.Lio_mcck_pending: 542 # TRACE_IRQS_ON already done at .Lio_return 543 l %r1,BASED(.Lc_handle_mcck) 544 basr %r14,%r1 # TIF bit will be cleared by handler 545 TRACE_IRQS_OFF 546 j .Lio_return 547 548# 549# _CIF_ASCE is set, load user space asce 550# 551.Lio_uaccess: 552 ni __LC_CPU_FLAGS+3,255-_CIF_ASCE 553 lctl %c1,%c1,__LC_USER_ASCE # load primary asce 554 j .Lio_return 555 556# 557# _TIF_NEED_RESCHED is set, call schedule 558# 559.Lio_reschedule: 560 # TRACE_IRQS_ON already done at .Lio_return 561 l %r1,BASED(.Lc_schedule) 562 ssm __LC_SVC_NEW_PSW # reenable interrupts 563 basr %r14,%r1 # call scheduler 564 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 565 TRACE_IRQS_OFF 566 j .Lio_return 567 568# 569# _TIF_SIGPENDING is set, call do_signal 570# 571.Lio_sigpending: 572 # TRACE_IRQS_ON already done at .Lio_return 573 l %r1,BASED(.Lc_do_signal) 574 ssm __LC_SVC_NEW_PSW # reenable interrupts 575 lr %r2,%r11 # pass pointer to pt_regs 576 basr %r14,%r1 # call do_signal 577 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 578 TRACE_IRQS_OFF 579 j .Lio_return 580 581# 582# _TIF_SIGPENDING is set, call do_signal 583# 584.Lio_notify_resume: 585 # TRACE_IRQS_ON already done at .Lio_return 586 l %r1,BASED(.Lc_do_notify_resume) 587 ssm __LC_SVC_NEW_PSW # reenable interrupts 588 lr %r2,%r11 # pass pointer to pt_regs 589 basr %r14,%r1 # call do_notify_resume 590 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 591 TRACE_IRQS_OFF 592 j .Lio_return 593 594/* 595 * External interrupt handler routine 596 */ 597 598ENTRY(ext_int_handler) 599 stck __LC_INT_CLOCK 600 stpt __LC_ASYNC_ENTER_TIMER 601 stm %r8,%r15,__LC_SAVE_AREA_ASYNC 602 l %r12,__LC_THREAD_INFO 603 l %r13,__LC_SVC_NEW_PSW+4 604 lm %r8,%r9,__LC_EXT_OLD_PSW 605 tmh %r8,0x0001 # interrupting from user ? 606 jz .Lext_skip 607 UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER 608.Lext_skip: 609 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT 610 stm %r0,%r7,__PT_R0(%r11) 611 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC 612 stm %r8,%r9,__PT_PSW(%r11) 613 mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR 614 mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS 615 xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) 616 TRACE_IRQS_OFF 617 l %r1,BASED(.Lc_do_IRQ) 618 lr %r2,%r11 # pass pointer to pt_regs 619 lhi %r3,EXT_INTERRUPT 620 basr %r14,%r1 # call do_IRQ 621 j .Lio_return 622 623/* 624 * Load idle PSW. The second "half" of this function is in .Lcleanup_idle. 625 */ 626ENTRY(psw_idle) 627 st %r3,__SF_EMPTY(%r15) 628 basr %r1,0 629 la %r1,.Lpsw_idle_lpsw+4-.(%r1) 630 st %r1,__SF_EMPTY+4(%r15) 631 oi __SF_EMPTY+4(%r15),0x80 632 stck __CLOCK_IDLE_ENTER(%r2) 633 stpt __TIMER_IDLE_ENTER(%r2) 634.Lpsw_idle_lpsw: 635 lpsw __SF_EMPTY(%r15) 636 br %r14 637.Lpsw_idle_end: 638 639.L__critical_end: 640 641/* 642 * Machine check handler routines 643 */ 644 645ENTRY(mcck_int_handler) 646 stck __LC_MCCK_CLOCK 647 spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer 648 lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs 649 l %r12,__LC_THREAD_INFO 650 l %r13,__LC_SVC_NEW_PSW+4 651 lm %r8,%r9,__LC_MCK_OLD_PSW 652 tm __LC_MCCK_CODE,0x80 # system damage? 653 jo .Lmcck_panic # yes -> rest of mcck code invalid 654 la %r14,__LC_CPU_TIMER_SAVE_AREA 655 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 656 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? 657 jo 3f 658 la %r14,__LC_SYNC_ENTER_TIMER 659 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER 660 jl 0f 661 la %r14,__LC_ASYNC_ENTER_TIMER 6620: clc 0(8,%r14),__LC_EXIT_TIMER 663 jl 1f 664 la %r14,__LC_EXIT_TIMER 6651: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER 666 jl 2f 667 la %r14,__LC_LAST_UPDATE_TIMER 6682: spt 0(%r14) 669 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 6703: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 671 jno .Lmcck_panic # no -> skip cleanup critical 672 tm %r8,0x0001 # interrupting from user ? 673 jz .Lmcck_skip 674 UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER 675.Lmcck_skip: 676 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT 677 stm %r0,%r7,__PT_R0(%r11) 678 mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32 679 stm %r8,%r9,__PT_PSW(%r11) 680 xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) 681 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 682 l %r1,BASED(.Lc_do_machine_check) 683 lr %r2,%r11 # pass pointer to pt_regs 684 basr %r14,%r1 # call s390_do_machine_check 685 tm __PT_PSW+1(%r11),0x01 # returning to user ? 686 jno .Lmcck_return 687 l %r1,__LC_KERNEL_STACK # switch to kernel stack 688 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 689 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) 690 la %r11,STACK_FRAME_OVERHEAD(%r15) 691 lr %r15,%r1 692 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off 693 tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING 694 jno .Lmcck_return 695 TRACE_IRQS_OFF 696 l %r1,BASED(.Lc_handle_mcck) 697 basr %r14,%r1 # call s390_handle_mcck 698 TRACE_IRQS_ON 699.Lmcck_return: 700 mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW 701 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 702 jno 0f 703 lm %r0,%r15,__PT_R0(%r11) 704 stpt __LC_EXIT_TIMER 705 lpsw __LC_RETURN_MCCK_PSW 7060: lm %r0,%r15,__PT_R0(%r11) 707 lpsw __LC_RETURN_MCCK_PSW 708 709.Lmcck_panic: 710 l %r14,__LC_PANIC_STACK 711 slr %r14,%r15 712 sra %r14,PAGE_SHIFT 713 jz 0f 714 l %r15,__LC_PANIC_STACK 715 j .Lmcck_skip 7160: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 717 j .Lmcck_skip 718 719# 720# PSW restart interrupt handler 721# 722ENTRY(restart_int_handler) 723 st %r15,__LC_SAVE_AREA_RESTART 724 l %r15,__LC_RESTART_STACK 725 ahi %r15,-__PT_SIZE # create pt_regs on stack 726 xc 0(__PT_SIZE,%r15),0(%r15) 727 stm %r0,%r14,__PT_R0(%r15) 728 mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART 729 mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw 730 ahi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack 731 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) 732 l %r1,__LC_RESTART_FN # load fn, parm & source cpu 733 l %r2,__LC_RESTART_DATA 734 l %r3,__LC_RESTART_SOURCE 735 ltr %r3,%r3 # test source cpu address 736 jm 1f # negative -> skip source stop 7370: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu 738 brc 10,0b # wait for status stored 7391: basr %r14,%r1 # call function 740 stap __SF_EMPTY(%r15) # store cpu address 741 lh %r3,__SF_EMPTY(%r15) 7422: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu 743 brc 2,2b 7443: j 3b 745 746 .section .kprobes.text, "ax" 747 748#ifdef CONFIG_CHECK_STACK 749/* 750 * The synchronous or the asynchronous stack overflowed. We are dead. 751 * No need to properly save the registers, we are going to panic anyway. 752 * Setup a pt_regs so that show_trace can provide a good call trace. 753 */ 754stack_overflow: 755 l %r15,__LC_PANIC_STACK # change to panic stack 756 la %r11,STACK_FRAME_OVERHEAD(%r15) 757 stm %r0,%r7,__PT_R0(%r11) 758 stm %r8,%r9,__PT_PSW(%r11) 759 mvc __PT_R8(32,%r11),0(%r14) 760 l %r1,BASED(1f) 761 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 762 lr %r2,%r11 # pass pointer to pt_regs 763 br %r1 # branch to kernel_stack_overflow 7641: .long kernel_stack_overflow 765#endif 766 767.Lcleanup_table: 768 .long system_call + 0x80000000 769 .long .Lsysc_do_svc + 0x80000000 770 .long .Lsysc_tif + 0x80000000 771 .long .Lsysc_restore + 0x80000000 772 .long .Lsysc_done + 0x80000000 773 .long .Lio_tif + 0x80000000 774 .long .Lio_restore + 0x80000000 775 .long .Lio_done + 0x80000000 776 .long psw_idle + 0x80000000 777 .long .Lpsw_idle_end + 0x80000000 778 779cleanup_critical: 780 cl %r9,BASED(.Lcleanup_table) # system_call 781 jl 0f 782 cl %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc 783 jl .Lcleanup_system_call 784 cl %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif 785 jl 0f 786 cl %r9,BASED(.Lcleanup_table+12) # .Lsysc_restore 787 jl .Lcleanup_sysc_tif 788 cl %r9,BASED(.Lcleanup_table+16) # .Lsysc_done 789 jl .Lcleanup_sysc_restore 790 cl %r9,BASED(.Lcleanup_table+20) # .Lio_tif 791 jl 0f 792 cl %r9,BASED(.Lcleanup_table+24) # .Lio_restore 793 jl .Lcleanup_io_tif 794 cl %r9,BASED(.Lcleanup_table+28) # .Lio_done 795 jl .Lcleanup_io_restore 796 cl %r9,BASED(.Lcleanup_table+32) # psw_idle 797 jl 0f 798 cl %r9,BASED(.Lcleanup_table+36) # .Lpsw_idle_end 799 jl .Lcleanup_idle 8000: br %r14 801 802.Lcleanup_system_call: 803 # check if stpt has been executed 804 cl %r9,BASED(.Lcleanup_system_call_insn) 805 jh 0f 806 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 807 chi %r11,__LC_SAVE_AREA_ASYNC 808 je 0f 809 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER 8100: # check if stm has been executed 811 cl %r9,BASED(.Lcleanup_system_call_insn+4) 812 jh 0f 813 mvc __LC_SAVE_AREA_SYNC(32),0(%r11) 8140: # set up saved registers r12, and r13 815 st %r12,16(%r11) # r12 thread-info pointer 816 st %r13,20(%r11) # r13 literal-pool pointer 817 # check if the user time calculation has been done 818 cl %r9,BASED(.Lcleanup_system_call_insn+8) 819 jh 0f 820 l %r10,__LC_EXIT_TIMER 821 l %r15,__LC_EXIT_TIMER+4 822 SUB64 %r10,%r15,__LC_SYNC_ENTER_TIMER 823 ADD64 %r10,%r15,__LC_USER_TIMER 824 st %r10,__LC_USER_TIMER 825 st %r15,__LC_USER_TIMER+4 8260: # check if the system time calculation has been done 827 cl %r9,BASED(.Lcleanup_system_call_insn+12) 828 jh 0f 829 l %r10,__LC_LAST_UPDATE_TIMER 830 l %r15,__LC_LAST_UPDATE_TIMER+4 831 SUB64 %r10,%r15,__LC_EXIT_TIMER 832 ADD64 %r10,%r15,__LC_SYSTEM_TIMER 833 st %r10,__LC_SYSTEM_TIMER 834 st %r15,__LC_SYSTEM_TIMER+4 8350: # update accounting time stamp 836 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 837 # set up saved register 11 838 l %r15,__LC_KERNEL_STACK 839 la %r9,STACK_FRAME_OVERHEAD(%r15) 840 st %r9,12(%r11) # r11 pt_regs pointer 841 # fill pt_regs 842 mvc __PT_R8(32,%r9),__LC_SAVE_AREA_SYNC 843 stm %r0,%r7,__PT_R0(%r9) 844 mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW 845 mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC 846 xc __PT_FLAGS(4,%r9),__PT_FLAGS(%r9) 847 mvi __PT_FLAGS+3(%r9),_PIF_SYSCALL 848 # setup saved register 15 849 st %r15,28(%r11) # r15 stack pointer 850 # set new psw address and exit 851 l %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc + 0x80000000 852 br %r14 853.Lcleanup_system_call_insn: 854 .long system_call + 0x80000000 855 .long .Lsysc_stm + 0x80000000 856 .long .Lsysc_vtime + 0x80000000 + 36 857 .long .Lsysc_vtime + 0x80000000 + 76 858 859.Lcleanup_sysc_tif: 860 l %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif + 0x80000000 861 br %r14 862 863.Lcleanup_sysc_restore: 864 cl %r9,BASED(.Lcleanup_sysc_restore_insn) 865 jhe 0f 866 l %r9,12(%r11) # get saved pointer to pt_regs 867 mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) 868 mvc 0(32,%r11),__PT_R8(%r9) 869 lm %r0,%r7,__PT_R0(%r9) 8700: lm %r8,%r9,__LC_RETURN_PSW 871 br %r14 872.Lcleanup_sysc_restore_insn: 873 .long .Lsysc_done - 4 + 0x80000000 874 875.Lcleanup_io_tif: 876 l %r9,BASED(.Lcleanup_table+20) # .Lio_tif + 0x80000000 877 br %r14 878 879.Lcleanup_io_restore: 880 cl %r9,BASED(.Lcleanup_io_restore_insn) 881 jhe 0f 882 l %r9,12(%r11) # get saved r11 pointer to pt_regs 883 mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) 884 mvc 0(32,%r11),__PT_R8(%r9) 885 lm %r0,%r7,__PT_R0(%r9) 8860: lm %r8,%r9,__LC_RETURN_PSW 887 br %r14 888.Lcleanup_io_restore_insn: 889 .long .Lio_done - 4 + 0x80000000 890 891.Lcleanup_idle: 892 # copy interrupt clock & cpu timer 893 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK 894 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER 895 chi %r11,__LC_SAVE_AREA_ASYNC 896 je 0f 897 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK 898 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER 8990: # check if stck has been executed 900 cl %r9,BASED(.Lcleanup_idle_insn) 901 jhe 1f 902 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) 903 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3) 9041: # account system time going idle 905 lm %r9,%r10,__LC_STEAL_TIMER 906 ADD64 %r9,%r10,__CLOCK_IDLE_ENTER(%r2) 907 SUB64 %r9,%r10,__LC_LAST_UPDATE_CLOCK 908 stm %r9,%r10,__LC_STEAL_TIMER 909 mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) 910 lm %r9,%r10,__LC_SYSTEM_TIMER 911 ADD64 %r9,%r10,__LC_LAST_UPDATE_TIMER 912 SUB64 %r9,%r10,__TIMER_IDLE_ENTER(%r2) 913 stm %r9,%r10,__LC_SYSTEM_TIMER 914 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) 915 # prepare return psw 916 n %r8,BASED(.Lcleanup_idle_wait) # clear irq & wait state bits 917 l %r9,24(%r11) # return from psw_idle 918 br %r14 919.Lcleanup_idle_insn: 920 .long .Lpsw_idle_lpsw + 0x80000000 921.Lcleanup_idle_wait: 922 .long 0xfcfdffff 923 924/* 925 * Integer constants 926 */ 927 .align 4 928.Lnr_syscalls: 929 .long NR_syscalls 930.Lvtimer_max: 931 .quad 0x7fffffffffffffff 932 933/* 934 * Symbol constants 935 */ 936.Lc_do_machine_check: .long s390_do_machine_check 937.Lc_handle_mcck: .long s390_handle_mcck 938.Lc_do_IRQ: .long do_IRQ 939.Lc_do_signal: .long do_signal 940.Lc_do_notify_resume: .long do_notify_resume 941.Lc_do_per_trap: .long do_per_trap 942.Lc_jump_table: .long pgm_check_table 943.Lc_schedule: .long schedule 944#ifdef CONFIG_PREEMPT 945.Lc_preempt_irq: .long preempt_schedule_irq 946#endif 947.Lc_trace_enter: .long do_syscall_trace_enter 948.Lc_trace_exit: .long do_syscall_trace_exit 949.Lc_schedule_tail: .long schedule_tail 950.Lc_sysc_per: .long .Lsysc_per + 0x80000000 951#ifdef CONFIG_TRACE_IRQFLAGS 952.Lc_hardirqs_on: .long trace_hardirqs_on_caller 953.Lc_hardirqs_off: .long trace_hardirqs_off_caller 954#endif 955#ifdef CONFIG_LOCKDEP 956.Lc_lockdep_sys_exit: .long lockdep_sys_exit 957#endif 958.Lc_critical_start: .long .L__critical_start + 0x80000000 959.Lc_critical_length: .long .L__critical_end - .L__critical_start 960 961 .section .rodata, "a" 962#define SYSCALL(esa,esame,emu) .long esa 963 .globl sys_call_table 964sys_call_table: 965#include "syscalls.S" 966#undef SYSCALL 967