1/* 2 * linux/arch/arm/kernel/entry-common.S 3 * 4 * Copyright (C) 2000 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11#include <asm/unistd.h> 12#include <asm/ftrace.h> 13#include <asm/unwind.h> 14 15#ifdef CONFIG_NEED_RET_TO_USER 16#include <mach/entry-macro.S> 17#else 18 .macro arch_ret_to_user, tmp1, tmp2 19 .endm 20#endif 21 22#include "entry-header.S" 23 24 25 .align 5 26/* 27 * This is the fast syscall return path. We do as little as 28 * possible here, and this includes saving r0 back into the SVC 29 * stack. 30 */ 31ret_fast_syscall: 32 UNWIND(.fnstart ) 33 UNWIND(.cantunwind ) 34 disable_irq @ disable interrupts 35 ldr r1, [tsk, #TI_FLAGS] 36 tst r1, #_TIF_WORK_MASK 37 bne fast_work_pending 38#if defined(CONFIG_IRQSOFF_TRACER) 39 asm_trace_hardirqs_on 40#endif 41 42 /* perform architecture specific actions before user return */ 43 arch_ret_to_user r1, lr 44 45 restore_user_regs fast = 1, offset = S_OFF 46 UNWIND(.fnend ) 47 48/* 49 * Ok, we need to do extra processing, enter the slow path. 50 */ 51fast_work_pending: 52 str r0, [sp, #S_R0+S_OFF]! @ returned r0 53work_pending: 54 tst r1, #_TIF_NEED_RESCHED 55 bne work_resched 56 tst r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME 57 beq no_work_pending 58 mov r0, sp @ 'regs' 59 mov r2, why @ 'syscall' 60 tst r1, #_TIF_SIGPENDING @ delivering a signal? 61 movne why, #0 @ prevent further restarts 62 bl do_notify_resume 63 b ret_slow_syscall @ Check work again 64 65work_resched: 66 bl schedule 67/* 68 * "slow" syscall return path. "why" tells us if this was a real syscall. 69 */ 70ENTRY(ret_to_user) 71ret_slow_syscall: 72 disable_irq @ disable interrupts 73ENTRY(ret_to_user_from_irq) 74 ldr r1, [tsk, #TI_FLAGS] 75 tst r1, #_TIF_WORK_MASK 76 bne work_pending 77no_work_pending: 78#if defined(CONFIG_IRQSOFF_TRACER) 79 asm_trace_hardirqs_on 80#endif 81 /* perform architecture specific actions before user return */ 82 arch_ret_to_user r1, lr 83 84 restore_user_regs fast = 0, offset = 0 85ENDPROC(ret_to_user_from_irq) 86ENDPROC(ret_to_user) 87 88/* 89 * This is how we return from a fork. 90 */ 91ENTRY(ret_from_fork) 92 bl schedule_tail 93 get_thread_info tsk 94 ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing 95 mov why, #1 96 tst r1, #_TIF_SYSCALL_WORK @ are we tracing syscalls? 97 beq ret_slow_syscall 98 mov r1, sp 99 mov r0, #1 @ trace exit [IP = 1] 100 bl syscall_trace 101 b ret_slow_syscall 102ENDPROC(ret_from_fork) 103 104 .equ NR_syscalls,0 105#define CALL(x) .equ NR_syscalls,NR_syscalls+1 106#include "calls.S" 107#undef CALL 108#define CALL(x) .long x 109 110#ifdef CONFIG_FUNCTION_TRACER 111/* 112 * When compiling with -pg, gcc inserts a call to the mcount routine at the 113 * start of every function. In mcount, apart from the function's address (in 114 * lr), we need to get hold of the function's caller's address. 115 * 116 * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this: 117 * 118 * bl mcount 119 * 120 * These versions have the limitation that in order for the mcount routine to 121 * be able to determine the function's caller's address, an APCS-style frame 122 * pointer (which is set up with something like the code below) is required. 123 * 124 * mov ip, sp 125 * push {fp, ip, lr, pc} 126 * sub fp, ip, #4 127 * 128 * With EABI, these frame pointers are not available unless -mapcs-frame is 129 * specified, and if building as Thumb-2, not even then. 130 * 131 * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount, 132 * with call sites like: 133 * 134 * push {lr} 135 * bl __gnu_mcount_nc 136 * 137 * With these compilers, frame pointers are not necessary. 138 * 139 * mcount can be thought of as a function called in the middle of a subroutine 140 * call. As such, it needs to be transparent for both the caller and the 141 * callee: the original lr needs to be restored when leaving mcount, and no 142 * registers should be clobbered. (In the __gnu_mcount_nc implementation, we 143 * clobber the ip register. This is OK because the ARM calling convention 144 * allows it to be clobbered in subroutines and doesn't use it to hold 145 * parameters.) 146 * 147 * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0" 148 * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see 149 * arch/arm/kernel/ftrace.c). 150 */ 151 152#ifndef CONFIG_OLD_MCOUNT 153#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4)) 154#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0. 155#endif 156#endif 157 158.macro mcount_adjust_addr rd, rn 159 bic \rd, \rn, #1 @ clear the Thumb bit if present 160 sub \rd, \rd, #MCOUNT_INSN_SIZE 161.endm 162 163.macro __mcount suffix 164 mcount_enter 165 ldr r0, =ftrace_trace_function 166 ldr r2, [r0] 167 adr r0, .Lftrace_stub 168 cmp r0, r2 169 bne 1f 170 171#ifdef CONFIG_FUNCTION_GRAPH_TRACER 172 ldr r1, =ftrace_graph_return 173 ldr r2, [r1] 174 cmp r0, r2 175 bne ftrace_graph_caller\suffix 176 177 ldr r1, =ftrace_graph_entry 178 ldr r2, [r1] 179 ldr r0, =ftrace_graph_entry_stub 180 cmp r0, r2 181 bne ftrace_graph_caller\suffix 182#endif 183 184 mcount_exit 185 1861: mcount_get_lr r1 @ lr of instrumented func 187 mcount_adjust_addr r0, lr @ instrumented function 188 adr lr, BSYM(2f) 189 mov pc, r2 1902: mcount_exit 191.endm 192 193.macro __ftrace_caller suffix 194 mcount_enter 195 196 mcount_get_lr r1 @ lr of instrumented func 197 mcount_adjust_addr r0, lr @ instrumented function 198 199 .globl ftrace_call\suffix 200ftrace_call\suffix: 201 bl ftrace_stub 202 203#ifdef CONFIG_FUNCTION_GRAPH_TRACER 204 .globl ftrace_graph_call\suffix 205ftrace_graph_call\suffix: 206 mov r0, r0 207#endif 208 209 mcount_exit 210.endm 211 212.macro __ftrace_graph_caller 213 sub r0, fp, #4 @ &lr of instrumented routine (&parent) 214#ifdef CONFIG_DYNAMIC_FTRACE 215 @ called from __ftrace_caller, saved in mcount_enter 216 ldr r1, [sp, #16] @ instrumented routine (func) 217 mcount_adjust_addr r1, r1 218#else 219 @ called from __mcount, untouched in lr 220 mcount_adjust_addr r1, lr @ instrumented routine (func) 221#endif 222 mov r2, fp @ frame pointer 223 bl prepare_ftrace_return 224 mcount_exit 225.endm 226 227#ifdef CONFIG_OLD_MCOUNT 228/* 229 * mcount 230 */ 231 232.macro mcount_enter 233 stmdb sp!, {r0-r3, lr} 234.endm 235 236.macro mcount_get_lr reg 237 ldr \reg, [fp, #-4] 238.endm 239 240.macro mcount_exit 241 ldr lr, [fp, #-4] 242 ldmia sp!, {r0-r3, pc} 243.endm 244 245ENTRY(mcount) 246#ifdef CONFIG_DYNAMIC_FTRACE 247 stmdb sp!, {lr} 248 ldr lr, [fp, #-4] 249 ldmia sp!, {pc} 250#else 251 __mcount _old 252#endif 253ENDPROC(mcount) 254 255#ifdef CONFIG_DYNAMIC_FTRACE 256ENTRY(ftrace_caller_old) 257 __ftrace_caller _old 258ENDPROC(ftrace_caller_old) 259#endif 260 261#ifdef CONFIG_FUNCTION_GRAPH_TRACER 262ENTRY(ftrace_graph_caller_old) 263 __ftrace_graph_caller 264ENDPROC(ftrace_graph_caller_old) 265#endif 266 267.purgem mcount_enter 268.purgem mcount_get_lr 269.purgem mcount_exit 270#endif 271 272/* 273 * __gnu_mcount_nc 274 */ 275 276.macro mcount_enter 277 stmdb sp!, {r0-r3, lr} 278.endm 279 280.macro mcount_get_lr reg 281 ldr \reg, [sp, #20] 282.endm 283 284.macro mcount_exit 285 ldmia sp!, {r0-r3, ip, lr} 286 mov pc, ip 287.endm 288 289ENTRY(__gnu_mcount_nc) 290#ifdef CONFIG_DYNAMIC_FTRACE 291 mov ip, lr 292 ldmia sp!, {lr} 293 mov pc, ip 294#else 295 __mcount 296#endif 297ENDPROC(__gnu_mcount_nc) 298 299#ifdef CONFIG_DYNAMIC_FTRACE 300ENTRY(ftrace_caller) 301 __ftrace_caller 302ENDPROC(ftrace_caller) 303#endif 304 305#ifdef CONFIG_FUNCTION_GRAPH_TRACER 306ENTRY(ftrace_graph_caller) 307 __ftrace_graph_caller 308ENDPROC(ftrace_graph_caller) 309#endif 310 311.purgem mcount_enter 312.purgem mcount_get_lr 313.purgem mcount_exit 314 315#ifdef CONFIG_FUNCTION_GRAPH_TRACER 316 .globl return_to_handler 317return_to_handler: 318 stmdb sp!, {r0-r3} 319 mov r0, fp @ frame pointer 320 bl ftrace_return_to_handler 321 mov lr, r0 @ r0 has real ret addr 322 ldmia sp!, {r0-r3} 323 mov pc, lr 324#endif 325 326ENTRY(ftrace_stub) 327.Lftrace_stub: 328 mov pc, lr 329ENDPROC(ftrace_stub) 330 331#endif /* CONFIG_FUNCTION_TRACER */ 332 333/*============================================================================= 334 * SWI handler 335 *----------------------------------------------------------------------------- 336 */ 337 338 .align 5 339ENTRY(vector_swi) 340 sub sp, sp, #S_FRAME_SIZE 341 stmia sp, {r0 - r12} @ Calling r0 - r12 342 ARM( add r8, sp, #S_PC ) 343 ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr 344 THUMB( mov r8, sp ) 345 THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr 346 mrs r8, spsr @ called from non-FIQ mode, so ok. 347 str lr, [sp, #S_PC] @ Save calling PC 348 str r8, [sp, #S_PSR] @ Save CPSR 349 str r0, [sp, #S_OLD_R0] @ Save OLD_R0 350 zero_fp 351 352 /* 353 * Get the system call number. 354 */ 355 356#if defined(CONFIG_OABI_COMPAT) 357 358 /* 359 * If we have CONFIG_OABI_COMPAT then we need to look at the swi 360 * value to determine if it is an EABI or an old ABI call. 361 */ 362#ifdef CONFIG_ARM_THUMB 363 tst r8, #PSR_T_BIT 364 movne r10, #0 @ no thumb OABI emulation 365 ldreq r10, [lr, #-4] @ get SWI instruction 366#else 367 ldr r10, [lr, #-4] @ get SWI instruction 368#endif 369#ifdef CONFIG_CPU_ENDIAN_BE8 370 rev r10, r10 @ little endian instruction 371#endif 372 373#elif defined(CONFIG_AEABI) 374 375 /* 376 * Pure EABI user space always put syscall number into scno (r7). 377 */ 378#elif defined(CONFIG_ARM_THUMB) 379 /* Legacy ABI only, possibly thumb mode. */ 380 tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs 381 addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in 382 ldreq scno, [lr, #-4] 383 384#else 385 /* Legacy ABI only. */ 386 ldr scno, [lr, #-4] @ get SWI instruction 387#endif 388 389#ifdef CONFIG_ALIGNMENT_TRAP 390 ldr ip, __cr_alignment 391 ldr ip, [ip] 392 mcr p15, 0, ip, c1, c0 @ update control register 393#endif 394 enable_irq 395 396 get_thread_info tsk 397 adr tbl, sys_call_table @ load syscall table pointer 398 399#if defined(CONFIG_OABI_COMPAT) 400 /* 401 * If the swi argument is zero, this is an EABI call and we do nothing. 402 * 403 * If this is an old ABI call, get the syscall number into scno and 404 * get the old ABI syscall table address. 405 */ 406 bics r10, r10, #0xff000000 407 eorne scno, r10, #__NR_OABI_SYSCALL_BASE 408 ldrne tbl, =sys_oabi_call_table 409#elif !defined(CONFIG_AEABI) 410 bic scno, scno, #0xff000000 @ mask off SWI op-code 411 eor scno, scno, #__NR_SYSCALL_BASE @ check OS number 412#endif 413 414 ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing 415 stmdb sp!, {r4, r5} @ push fifth and sixth args 416 417#ifdef CONFIG_SECCOMP 418 tst r10, #_TIF_SECCOMP 419 beq 1f 420 mov r0, scno 421 bl __secure_computing 422 add r0, sp, #S_R0 + S_OFF @ pointer to regs 423 ldmia r0, {r0 - r3} @ have to reload r0 - r3 4241: 425#endif 426 427 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? 428 bne __sys_trace 429 430 cmp scno, #NR_syscalls @ check upper syscall limit 431 adr lr, BSYM(ret_fast_syscall) @ return address 432 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 433 434 add r1, sp, #S_OFF 4352: mov why, #0 @ no longer a real syscall 436 cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) 437 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back 438 bcs arm_syscall 439 b sys_ni_syscall @ not private func 440ENDPROC(vector_swi) 441 442 /* 443 * This is the really slow path. We're going to be doing 444 * context switches, and waiting for our parent to respond. 445 */ 446__sys_trace: 447 mov r2, scno 448 add r1, sp, #S_OFF 449 mov r0, #0 @ trace entry [IP = 0] 450 bl syscall_trace 451 452 adr lr, BSYM(__sys_trace_return) @ return address 453 mov scno, r0 @ syscall number (possibly new) 454 add r1, sp, #S_R0 + S_OFF @ pointer to regs 455 cmp scno, #NR_syscalls @ check upper syscall limit 456 ldmccia r1, {r0 - r3} @ have to reload r0 - r3 457 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 458 b 2b 459 460__sys_trace_return: 461 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 462 mov r2, scno 463 mov r1, sp 464 mov r0, #1 @ trace exit [IP = 1] 465 bl syscall_trace 466 b ret_slow_syscall 467 468 .align 5 469#ifdef CONFIG_ALIGNMENT_TRAP 470 .type __cr_alignment, #object 471__cr_alignment: 472 .word cr_alignment 473#endif 474 .ltorg 475 476/* 477 * This is the syscall table declaration for native ABI syscalls. 478 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall. 479 */ 480#define ABI(native, compat) native 481#ifdef CONFIG_AEABI 482#define OBSOLETE(syscall) sys_ni_syscall 483#else 484#define OBSOLETE(syscall) syscall 485#endif 486 487 .type sys_call_table, #object 488ENTRY(sys_call_table) 489#include "calls.S" 490#undef ABI 491#undef OBSOLETE 492 493/*============================================================================ 494 * Special system call wrappers 495 */ 496@ r0 = syscall number 497@ r8 = syscall table 498sys_syscall: 499 bic scno, r0, #__NR_OABI_SYSCALL_BASE 500 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE 501 cmpne scno, #NR_syscalls @ check range 502 stmloia sp, {r5, r6} @ shuffle args 503 movlo r0, r1 504 movlo r1, r2 505 movlo r2, r3 506 movlo r3, r4 507 ldrlo pc, [tbl, scno, lsl #2] 508 b sys_ni_syscall 509ENDPROC(sys_syscall) 510 511sys_fork_wrapper: 512 add r0, sp, #S_OFF 513 b sys_fork 514ENDPROC(sys_fork_wrapper) 515 516sys_vfork_wrapper: 517 add r0, sp, #S_OFF 518 b sys_vfork 519ENDPROC(sys_vfork_wrapper) 520 521sys_execve_wrapper: 522 add r3, sp, #S_OFF 523 b sys_execve 524ENDPROC(sys_execve_wrapper) 525 526sys_clone_wrapper: 527 add ip, sp, #S_OFF 528 str ip, [sp, #4] 529 b sys_clone 530ENDPROC(sys_clone_wrapper) 531 532sys_sigreturn_wrapper: 533 add r0, sp, #S_OFF 534 mov why, #0 @ prevent syscall restart handling 535 b sys_sigreturn 536ENDPROC(sys_sigreturn_wrapper) 537 538sys_rt_sigreturn_wrapper: 539 add r0, sp, #S_OFF 540 mov why, #0 @ prevent syscall restart handling 541 b sys_rt_sigreturn 542ENDPROC(sys_rt_sigreturn_wrapper) 543 544sys_sigaltstack_wrapper: 545 ldr r2, [sp, #S_OFF + S_SP] 546 b do_sigaltstack 547ENDPROC(sys_sigaltstack_wrapper) 548 549sys_statfs64_wrapper: 550 teq r1, #88 551 moveq r1, #84 552 b sys_statfs64 553ENDPROC(sys_statfs64_wrapper) 554 555sys_fstatfs64_wrapper: 556 teq r1, #88 557 moveq r1, #84 558 b sys_fstatfs64 559ENDPROC(sys_fstatfs64_wrapper) 560 561/* 562 * Note: off_4k (r5) is always units of 4K. If we can't do the requested 563 * offset, we return EINVAL. 564 */ 565sys_mmap2: 566#if PAGE_SHIFT > 12 567 tst r5, #PGOFF_MASK 568 moveq r5, r5, lsr #PAGE_SHIFT - 12 569 streq r5, [sp, #4] 570 beq sys_mmap_pgoff 571 mov r0, #-EINVAL 572 mov pc, lr 573#else 574 str r5, [sp, #4] 575 b sys_mmap_pgoff 576#endif 577ENDPROC(sys_mmap2) 578 579#ifdef CONFIG_OABI_COMPAT 580 581/* 582 * These are syscalls with argument register differences 583 */ 584 585sys_oabi_pread64: 586 stmia sp, {r3, r4} 587 b sys_pread64 588ENDPROC(sys_oabi_pread64) 589 590sys_oabi_pwrite64: 591 stmia sp, {r3, r4} 592 b sys_pwrite64 593ENDPROC(sys_oabi_pwrite64) 594 595sys_oabi_truncate64: 596 mov r3, r2 597 mov r2, r1 598 b sys_truncate64 599ENDPROC(sys_oabi_truncate64) 600 601sys_oabi_ftruncate64: 602 mov r3, r2 603 mov r2, r1 604 b sys_ftruncate64 605ENDPROC(sys_oabi_ftruncate64) 606 607sys_oabi_readahead: 608 str r3, [sp] 609 mov r3, r2 610 mov r2, r1 611 b sys_readahead 612ENDPROC(sys_oabi_readahead) 613 614/* 615 * Let's declare a second syscall table for old ABI binaries 616 * using the compatibility syscall entries. 617 */ 618#define ABI(native, compat) compat 619#define OBSOLETE(syscall) syscall 620 621 .type sys_oabi_call_table, #object 622ENTRY(sys_oabi_call_table) 623#include "calls.S" 624#undef ABI 625#undef OBSOLETE 626 627#endif 628 629