1/* 2 * linux/arch/arm/kernel/entry-common.S 3 * 4 * Copyright (C) 2000 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11#include <asm/unistd.h> 12#include <asm/ftrace.h> 13#include <asm/unwind.h> 14 15#ifdef CONFIG_NEED_RET_TO_USER 16#include <mach/entry-macro.S> 17#else 18 .macro arch_ret_to_user, tmp1, tmp2 19 .endm 20#endif 21 22#include "entry-header.S" 23 24 25 .align 5 26/* 27 * This is the fast syscall return path. We do as little as 28 * possible here, and this includes saving r0 back into the SVC 29 * stack. 30 */ 31ret_fast_syscall: 32 UNWIND(.fnstart ) 33 UNWIND(.cantunwind ) 34 disable_irq @ disable interrupts 35 ldr r1, [tsk, #TI_FLAGS] 36 tst r1, #_TIF_WORK_MASK 37 bne fast_work_pending 38#if defined(CONFIG_IRQSOFF_TRACER) 39 asm_trace_hardirqs_on 40#endif 41 42 /* perform architecture specific actions before user return */ 43 arch_ret_to_user r1, lr 44 45 restore_user_regs fast = 1, offset = S_OFF 46 UNWIND(.fnend ) 47 48/* 49 * Ok, we need to do extra processing, enter the slow path. 50 */ 51fast_work_pending: 52 str r0, [sp, #S_R0+S_OFF]! @ returned r0 53work_pending: 54 tst r1, #_TIF_NEED_RESCHED 55 bne work_resched 56 /* 57 * TIF_SIGPENDING or TIF_NOTIFY_RESUME must've been set if we got here 58 */ 59 ldr r2, [sp, #S_PSR] 60 mov r0, sp @ 'regs' 61 tst r2, #15 @ are we returning to user mode? 62 bne no_work_pending @ no? just leave, then... 63 mov r2, why @ 'syscall' 64 tst r1, #_TIF_SIGPENDING @ delivering a signal? 65 movne why, #0 @ prevent further restarts 66 bl do_notify_resume 67 b ret_slow_syscall @ Check work again 68 69work_resched: 70 bl schedule 71/* 72 * "slow" syscall return path. "why" tells us if this was a real syscall. 73 */ 74ENTRY(ret_to_user) 75ret_slow_syscall: 76 disable_irq @ disable interrupts 77ENTRY(ret_to_user_from_irq) 78 ldr r1, [tsk, #TI_FLAGS] 79 tst r1, #_TIF_WORK_MASK 80 bne work_pending 81no_work_pending: 82#if defined(CONFIG_IRQSOFF_TRACER) 83 asm_trace_hardirqs_on 84#endif 85 /* perform architecture specific actions before user return */ 86 arch_ret_to_user r1, lr 87 88 restore_user_regs fast = 0, offset = 0 89ENDPROC(ret_to_user_from_irq) 90ENDPROC(ret_to_user) 91 92/* 93 * This is how we return from a fork. 94 */ 95ENTRY(ret_from_fork) 96 bl schedule_tail 97 get_thread_info tsk 98 ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing 99 mov why, #1 100 tst r1, #_TIF_SYSCALL_WORK @ are we tracing syscalls? 101 beq ret_slow_syscall 102 mov r1, sp 103 mov r0, #1 @ trace exit [IP = 1] 104 bl syscall_trace 105 b ret_slow_syscall 106ENDPROC(ret_from_fork) 107 108 .equ NR_syscalls,0 109#define CALL(x) .equ NR_syscalls,NR_syscalls+1 110#include "calls.S" 111#undef CALL 112#define CALL(x) .long x 113 114#ifdef CONFIG_FUNCTION_TRACER 115/* 116 * When compiling with -pg, gcc inserts a call to the mcount routine at the 117 * start of every function. In mcount, apart from the function's address (in 118 * lr), we need to get hold of the function's caller's address. 119 * 120 * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this: 121 * 122 * bl mcount 123 * 124 * These versions have the limitation that in order for the mcount routine to 125 * be able to determine the function's caller's address, an APCS-style frame 126 * pointer (which is set up with something like the code below) is required. 127 * 128 * mov ip, sp 129 * push {fp, ip, lr, pc} 130 * sub fp, ip, #4 131 * 132 * With EABI, these frame pointers are not available unless -mapcs-frame is 133 * specified, and if building as Thumb-2, not even then. 134 * 135 * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount, 136 * with call sites like: 137 * 138 * push {lr} 139 * bl __gnu_mcount_nc 140 * 141 * With these compilers, frame pointers are not necessary. 142 * 143 * mcount can be thought of as a function called in the middle of a subroutine 144 * call. As such, it needs to be transparent for both the caller and the 145 * callee: the original lr needs to be restored when leaving mcount, and no 146 * registers should be clobbered. (In the __gnu_mcount_nc implementation, we 147 * clobber the ip register. This is OK because the ARM calling convention 148 * allows it to be clobbered in subroutines and doesn't use it to hold 149 * parameters.) 150 * 151 * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0" 152 * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see 153 * arch/arm/kernel/ftrace.c). 154 */ 155 156#ifndef CONFIG_OLD_MCOUNT 157#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4)) 158#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0. 159#endif 160#endif 161 162.macro mcount_adjust_addr rd, rn 163 bic \rd, \rn, #1 @ clear the Thumb bit if present 164 sub \rd, \rd, #MCOUNT_INSN_SIZE 165.endm 166 167.macro __mcount suffix 168 mcount_enter 169 ldr r0, =ftrace_trace_function 170 ldr r2, [r0] 171 adr r0, .Lftrace_stub 172 cmp r0, r2 173 bne 1f 174 175#ifdef CONFIG_FUNCTION_GRAPH_TRACER 176 ldr r1, =ftrace_graph_return 177 ldr r2, [r1] 178 cmp r0, r2 179 bne ftrace_graph_caller\suffix 180 181 ldr r1, =ftrace_graph_entry 182 ldr r2, [r1] 183 ldr r0, =ftrace_graph_entry_stub 184 cmp r0, r2 185 bne ftrace_graph_caller\suffix 186#endif 187 188 mcount_exit 189 1901: mcount_get_lr r1 @ lr of instrumented func 191 mcount_adjust_addr r0, lr @ instrumented function 192 adr lr, BSYM(2f) 193 mov pc, r2 1942: mcount_exit 195.endm 196 197.macro __ftrace_caller suffix 198 mcount_enter 199 200 mcount_get_lr r1 @ lr of instrumented func 201 mcount_adjust_addr r0, lr @ instrumented function 202 203 .globl ftrace_call\suffix 204ftrace_call\suffix: 205 bl ftrace_stub 206 207#ifdef CONFIG_FUNCTION_GRAPH_TRACER 208 .globl ftrace_graph_call\suffix 209ftrace_graph_call\suffix: 210 mov r0, r0 211#endif 212 213 mcount_exit 214.endm 215 216.macro __ftrace_graph_caller 217 sub r0, fp, #4 @ &lr of instrumented routine (&parent) 218#ifdef CONFIG_DYNAMIC_FTRACE 219 @ called from __ftrace_caller, saved in mcount_enter 220 ldr r1, [sp, #16] @ instrumented routine (func) 221 mcount_adjust_addr r1, r1 222#else 223 @ called from __mcount, untouched in lr 224 mcount_adjust_addr r1, lr @ instrumented routine (func) 225#endif 226 mov r2, fp @ frame pointer 227 bl prepare_ftrace_return 228 mcount_exit 229.endm 230 231#ifdef CONFIG_OLD_MCOUNT 232/* 233 * mcount 234 */ 235 236.macro mcount_enter 237 stmdb sp!, {r0-r3, lr} 238.endm 239 240.macro mcount_get_lr reg 241 ldr \reg, [fp, #-4] 242.endm 243 244.macro mcount_exit 245 ldr lr, [fp, #-4] 246 ldmia sp!, {r0-r3, pc} 247.endm 248 249ENTRY(mcount) 250#ifdef CONFIG_DYNAMIC_FTRACE 251 stmdb sp!, {lr} 252 ldr lr, [fp, #-4] 253 ldmia sp!, {pc} 254#else 255 __mcount _old 256#endif 257ENDPROC(mcount) 258 259#ifdef CONFIG_DYNAMIC_FTRACE 260ENTRY(ftrace_caller_old) 261 __ftrace_caller _old 262ENDPROC(ftrace_caller_old) 263#endif 264 265#ifdef CONFIG_FUNCTION_GRAPH_TRACER 266ENTRY(ftrace_graph_caller_old) 267 __ftrace_graph_caller 268ENDPROC(ftrace_graph_caller_old) 269#endif 270 271.purgem mcount_enter 272.purgem mcount_get_lr 273.purgem mcount_exit 274#endif 275 276/* 277 * __gnu_mcount_nc 278 */ 279 280.macro mcount_enter 281 stmdb sp!, {r0-r3, lr} 282.endm 283 284.macro mcount_get_lr reg 285 ldr \reg, [sp, #20] 286.endm 287 288.macro mcount_exit 289 ldmia sp!, {r0-r3, ip, lr} 290 mov pc, ip 291.endm 292 293ENTRY(__gnu_mcount_nc) 294#ifdef CONFIG_DYNAMIC_FTRACE 295 mov ip, lr 296 ldmia sp!, {lr} 297 mov pc, ip 298#else 299 __mcount 300#endif 301ENDPROC(__gnu_mcount_nc) 302 303#ifdef CONFIG_DYNAMIC_FTRACE 304ENTRY(ftrace_caller) 305 __ftrace_caller 306ENDPROC(ftrace_caller) 307#endif 308 309#ifdef CONFIG_FUNCTION_GRAPH_TRACER 310ENTRY(ftrace_graph_caller) 311 __ftrace_graph_caller 312ENDPROC(ftrace_graph_caller) 313#endif 314 315.purgem mcount_enter 316.purgem mcount_get_lr 317.purgem mcount_exit 318 319#ifdef CONFIG_FUNCTION_GRAPH_TRACER 320 .globl return_to_handler 321return_to_handler: 322 stmdb sp!, {r0-r3} 323 mov r0, fp @ frame pointer 324 bl ftrace_return_to_handler 325 mov lr, r0 @ r0 has real ret addr 326 ldmia sp!, {r0-r3} 327 mov pc, lr 328#endif 329 330ENTRY(ftrace_stub) 331.Lftrace_stub: 332 mov pc, lr 333ENDPROC(ftrace_stub) 334 335#endif /* CONFIG_FUNCTION_TRACER */ 336 337/*============================================================================= 338 * SWI handler 339 *----------------------------------------------------------------------------- 340 */ 341 342 .align 5 343ENTRY(vector_swi) 344 sub sp, sp, #S_FRAME_SIZE 345 stmia sp, {r0 - r12} @ Calling r0 - r12 346 ARM( add r8, sp, #S_PC ) 347 ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr 348 THUMB( mov r8, sp ) 349 THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr 350 mrs r8, spsr @ called from non-FIQ mode, so ok. 351 str lr, [sp, #S_PC] @ Save calling PC 352 str r8, [sp, #S_PSR] @ Save CPSR 353 str r0, [sp, #S_OLD_R0] @ Save OLD_R0 354 zero_fp 355 356 /* 357 * Get the system call number. 358 */ 359 360#if defined(CONFIG_OABI_COMPAT) 361 362 /* 363 * If we have CONFIG_OABI_COMPAT then we need to look at the swi 364 * value to determine if it is an EABI or an old ABI call. 365 */ 366#ifdef CONFIG_ARM_THUMB 367 tst r8, #PSR_T_BIT 368 movne r10, #0 @ no thumb OABI emulation 369 ldreq r10, [lr, #-4] @ get SWI instruction 370#else 371 ldr r10, [lr, #-4] @ get SWI instruction 372#endif 373#ifdef CONFIG_CPU_ENDIAN_BE8 374 rev r10, r10 @ little endian instruction 375#endif 376 377#elif defined(CONFIG_AEABI) 378 379 /* 380 * Pure EABI user space always put syscall number into scno (r7). 381 */ 382#elif defined(CONFIG_ARM_THUMB) 383 /* Legacy ABI only, possibly thumb mode. */ 384 tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs 385 addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in 386 ldreq scno, [lr, #-4] 387 388#else 389 /* Legacy ABI only. */ 390 ldr scno, [lr, #-4] @ get SWI instruction 391#endif 392 393#ifdef CONFIG_ALIGNMENT_TRAP 394 ldr ip, __cr_alignment 395 ldr ip, [ip] 396 mcr p15, 0, ip, c1, c0 @ update control register 397#endif 398 enable_irq 399 400 get_thread_info tsk 401 adr tbl, sys_call_table @ load syscall table pointer 402 403#if defined(CONFIG_OABI_COMPAT) 404 /* 405 * If the swi argument is zero, this is an EABI call and we do nothing. 406 * 407 * If this is an old ABI call, get the syscall number into scno and 408 * get the old ABI syscall table address. 409 */ 410 bics r10, r10, #0xff000000 411 eorne scno, r10, #__NR_OABI_SYSCALL_BASE 412 ldrne tbl, =sys_oabi_call_table 413#elif !defined(CONFIG_AEABI) 414 bic scno, scno, #0xff000000 @ mask off SWI op-code 415 eor scno, scno, #__NR_SYSCALL_BASE @ check OS number 416#endif 417 418 ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing 419 stmdb sp!, {r4, r5} @ push fifth and sixth args 420 421#ifdef CONFIG_SECCOMP 422 tst r10, #_TIF_SECCOMP 423 beq 1f 424 mov r0, scno 425 bl __secure_computing 426 add r0, sp, #S_R0 + S_OFF @ pointer to regs 427 ldmia r0, {r0 - r3} @ have to reload r0 - r3 4281: 429#endif 430 431 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? 432 bne __sys_trace 433 434 cmp scno, #NR_syscalls @ check upper syscall limit 435 adr lr, BSYM(ret_fast_syscall) @ return address 436 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 437 438 add r1, sp, #S_OFF 4392: mov why, #0 @ no longer a real syscall 440 cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) 441 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back 442 bcs arm_syscall 443 b sys_ni_syscall @ not private func 444ENDPROC(vector_swi) 445 446 /* 447 * This is the really slow path. We're going to be doing 448 * context switches, and waiting for our parent to respond. 449 */ 450__sys_trace: 451 mov r2, scno 452 add r1, sp, #S_OFF 453 mov r0, #0 @ trace entry [IP = 0] 454 bl syscall_trace 455 456 adr lr, BSYM(__sys_trace_return) @ return address 457 mov scno, r0 @ syscall number (possibly new) 458 add r1, sp, #S_R0 + S_OFF @ pointer to regs 459 cmp scno, #NR_syscalls @ check upper syscall limit 460 ldmccia r1, {r0 - r3} @ have to reload r0 - r3 461 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 462 b 2b 463 464__sys_trace_return: 465 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 466 mov r2, scno 467 mov r1, sp 468 mov r0, #1 @ trace exit [IP = 1] 469 bl syscall_trace 470 b ret_slow_syscall 471 472 .align 5 473#ifdef CONFIG_ALIGNMENT_TRAP 474 .type __cr_alignment, #object 475__cr_alignment: 476 .word cr_alignment 477#endif 478 .ltorg 479 480/* 481 * This is the syscall table declaration for native ABI syscalls. 482 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall. 483 */ 484#define ABI(native, compat) native 485#ifdef CONFIG_AEABI 486#define OBSOLETE(syscall) sys_ni_syscall 487#else 488#define OBSOLETE(syscall) syscall 489#endif 490 491 .type sys_call_table, #object 492ENTRY(sys_call_table) 493#include "calls.S" 494#undef ABI 495#undef OBSOLETE 496 497/*============================================================================ 498 * Special system call wrappers 499 */ 500@ r0 = syscall number 501@ r8 = syscall table 502sys_syscall: 503 bic scno, r0, #__NR_OABI_SYSCALL_BASE 504 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE 505 cmpne scno, #NR_syscalls @ check range 506 stmloia sp, {r5, r6} @ shuffle args 507 movlo r0, r1 508 movlo r1, r2 509 movlo r2, r3 510 movlo r3, r4 511 ldrlo pc, [tbl, scno, lsl #2] 512 b sys_ni_syscall 513ENDPROC(sys_syscall) 514 515sys_fork_wrapper: 516 add r0, sp, #S_OFF 517 b sys_fork 518ENDPROC(sys_fork_wrapper) 519 520sys_vfork_wrapper: 521 add r0, sp, #S_OFF 522 b sys_vfork 523ENDPROC(sys_vfork_wrapper) 524 525sys_execve_wrapper: 526 add r3, sp, #S_OFF 527 b sys_execve 528ENDPROC(sys_execve_wrapper) 529 530sys_clone_wrapper: 531 add ip, sp, #S_OFF 532 str ip, [sp, #4] 533 b sys_clone 534ENDPROC(sys_clone_wrapper) 535 536sys_sigreturn_wrapper: 537 add r0, sp, #S_OFF 538 mov why, #0 @ prevent syscall restart handling 539 b sys_sigreturn 540ENDPROC(sys_sigreturn_wrapper) 541 542sys_rt_sigreturn_wrapper: 543 add r0, sp, #S_OFF 544 mov why, #0 @ prevent syscall restart handling 545 b sys_rt_sigreturn 546ENDPROC(sys_rt_sigreturn_wrapper) 547 548sys_sigaltstack_wrapper: 549 ldr r2, [sp, #S_OFF + S_SP] 550 b do_sigaltstack 551ENDPROC(sys_sigaltstack_wrapper) 552 553sys_statfs64_wrapper: 554 teq r1, #88 555 moveq r1, #84 556 b sys_statfs64 557ENDPROC(sys_statfs64_wrapper) 558 559sys_fstatfs64_wrapper: 560 teq r1, #88 561 moveq r1, #84 562 b sys_fstatfs64 563ENDPROC(sys_fstatfs64_wrapper) 564 565/* 566 * Note: off_4k (r5) is always units of 4K. If we can't do the requested 567 * offset, we return EINVAL. 568 */ 569sys_mmap2: 570#if PAGE_SHIFT > 12 571 tst r5, #PGOFF_MASK 572 moveq r5, r5, lsr #PAGE_SHIFT - 12 573 streq r5, [sp, #4] 574 beq sys_mmap_pgoff 575 mov r0, #-EINVAL 576 mov pc, lr 577#else 578 str r5, [sp, #4] 579 b sys_mmap_pgoff 580#endif 581ENDPROC(sys_mmap2) 582 583#ifdef CONFIG_OABI_COMPAT 584 585/* 586 * These are syscalls with argument register differences 587 */ 588 589sys_oabi_pread64: 590 stmia sp, {r3, r4} 591 b sys_pread64 592ENDPROC(sys_oabi_pread64) 593 594sys_oabi_pwrite64: 595 stmia sp, {r3, r4} 596 b sys_pwrite64 597ENDPROC(sys_oabi_pwrite64) 598 599sys_oabi_truncate64: 600 mov r3, r2 601 mov r2, r1 602 b sys_truncate64 603ENDPROC(sys_oabi_truncate64) 604 605sys_oabi_ftruncate64: 606 mov r3, r2 607 mov r2, r1 608 b sys_ftruncate64 609ENDPROC(sys_oabi_ftruncate64) 610 611sys_oabi_readahead: 612 str r3, [sp] 613 mov r3, r2 614 mov r2, r1 615 b sys_readahead 616ENDPROC(sys_oabi_readahead) 617 618/* 619 * Let's declare a second syscall table for old ABI binaries 620 * using the compatibility syscall entries. 621 */ 622#define ABI(native, compat) compat 623#define OBSOLETE(syscall) syscall 624 625 .type sys_oabi_call_table, #object 626ENTRY(sys_oabi_call_table) 627#include "calls.S" 628#undef ABI 629#undef OBSOLETE 630 631#endif 632 633