1/* 2 * linux/arch/arm/kernel/entry-common.S 3 * 4 * Copyright (C) 2000 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11#include <asm/unistd.h> 12#include <asm/ftrace.h> 13#include <asm/unwind.h> 14 15#ifdef CONFIG_NEED_RET_TO_USER 16#include <mach/entry-macro.S> 17#else 18 .macro arch_ret_to_user, tmp1, tmp2 19 .endm 20#endif 21 22#include "entry-header.S" 23 24 25 .align 5 26/* 27 * This is the fast syscall return path. We do as little as 28 * possible here, and this includes saving r0 back into the SVC 29 * stack. 30 */ 31ret_fast_syscall: 32 UNWIND(.fnstart ) 33 UNWIND(.cantunwind ) 34 disable_irq @ disable interrupts 35 ldr r1, [tsk, #TI_FLAGS] 36 tst r1, #_TIF_WORK_MASK 37 bne fast_work_pending 38#if defined(CONFIG_IRQSOFF_TRACER) 39 asm_trace_hardirqs_on 40#endif 41 42 /* perform architecture specific actions before user return */ 43 arch_ret_to_user r1, lr 44 45 restore_user_regs fast = 1, offset = S_OFF 46 UNWIND(.fnend ) 47 48/* 49 * Ok, we need to do extra processing, enter the slow path. 50 */ 51fast_work_pending: 52 str r0, [sp, #S_R0+S_OFF]! @ returned r0 53work_pending: 54 mov r0, sp @ 'regs' 55 mov r2, why @ 'syscall' 56 bl do_work_pending 57 cmp r0, #0 58 beq no_work_pending 59 movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE) 60 ldmia sp, {r0 - r6} @ have to reload r0 - r6 61 b local_restart @ ... and off we go 62 63/* 64 * "slow" syscall return path. "why" tells us if this was a real syscall. 65 */ 66ENTRY(ret_to_user) 67ret_slow_syscall: 68 disable_irq @ disable interrupts 69ENTRY(ret_to_user_from_irq) 70 ldr r1, [tsk, #TI_FLAGS] 71 tst r1, #_TIF_WORK_MASK 72 bne work_pending 73no_work_pending: 74#if defined(CONFIG_IRQSOFF_TRACER) 75 asm_trace_hardirqs_on 76#endif 77 /* perform architecture specific actions before user return */ 78 arch_ret_to_user r1, lr 79 80 restore_user_regs fast = 0, offset = 0 81ENDPROC(ret_to_user_from_irq) 82ENDPROC(ret_to_user) 83 84/* 85 * This is how we return from a fork. 86 */ 87ENTRY(ret_from_fork) 88 bl schedule_tail 89 get_thread_info tsk 90 mov why, #1 91 b ret_slow_syscall 92ENDPROC(ret_from_fork) 93 94 .equ NR_syscalls,0 95#define CALL(x) .equ NR_syscalls,NR_syscalls+1 96#include "calls.S" 97#undef CALL 98#define CALL(x) .long x 99 100#ifdef CONFIG_FUNCTION_TRACER 101/* 102 * When compiling with -pg, gcc inserts a call to the mcount routine at the 103 * start of every function. In mcount, apart from the function's address (in 104 * lr), we need to get hold of the function's caller's address. 105 * 106 * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this: 107 * 108 * bl mcount 109 * 110 * These versions have the limitation that in order for the mcount routine to 111 * be able to determine the function's caller's address, an APCS-style frame 112 * pointer (which is set up with something like the code below) is required. 113 * 114 * mov ip, sp 115 * push {fp, ip, lr, pc} 116 * sub fp, ip, #4 117 * 118 * With EABI, these frame pointers are not available unless -mapcs-frame is 119 * specified, and if building as Thumb-2, not even then. 120 * 121 * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount, 122 * with call sites like: 123 * 124 * push {lr} 125 * bl __gnu_mcount_nc 126 * 127 * With these compilers, frame pointers are not necessary. 128 * 129 * mcount can be thought of as a function called in the middle of a subroutine 130 * call. As such, it needs to be transparent for both the caller and the 131 * callee: the original lr needs to be restored when leaving mcount, and no 132 * registers should be clobbered. (In the __gnu_mcount_nc implementation, we 133 * clobber the ip register. This is OK because the ARM calling convention 134 * allows it to be clobbered in subroutines and doesn't use it to hold 135 * parameters.) 136 * 137 * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0" 138 * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see 139 * arch/arm/kernel/ftrace.c). 140 */ 141 142#ifndef CONFIG_OLD_MCOUNT 143#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4)) 144#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0. 145#endif 146#endif 147 148.macro mcount_adjust_addr rd, rn 149 bic \rd, \rn, #1 @ clear the Thumb bit if present 150 sub \rd, \rd, #MCOUNT_INSN_SIZE 151.endm 152 153.macro __mcount suffix 154 mcount_enter 155 ldr r0, =ftrace_trace_function 156 ldr r2, [r0] 157 adr r0, .Lftrace_stub 158 cmp r0, r2 159 bne 1f 160 161#ifdef CONFIG_FUNCTION_GRAPH_TRACER 162 ldr r1, =ftrace_graph_return 163 ldr r2, [r1] 164 cmp r0, r2 165 bne ftrace_graph_caller\suffix 166 167 ldr r1, =ftrace_graph_entry 168 ldr r2, [r1] 169 ldr r0, =ftrace_graph_entry_stub 170 cmp r0, r2 171 bne ftrace_graph_caller\suffix 172#endif 173 174 mcount_exit 175 1761: mcount_get_lr r1 @ lr of instrumented func 177 mcount_adjust_addr r0, lr @ instrumented function 178 adr lr, BSYM(2f) 179 mov pc, r2 1802: mcount_exit 181.endm 182 183.macro __ftrace_caller suffix 184 mcount_enter 185 186 mcount_get_lr r1 @ lr of instrumented func 187 mcount_adjust_addr r0, lr @ instrumented function 188 189 .globl ftrace_call\suffix 190ftrace_call\suffix: 191 bl ftrace_stub 192 193#ifdef CONFIG_FUNCTION_GRAPH_TRACER 194 .globl ftrace_graph_call\suffix 195ftrace_graph_call\suffix: 196 mov r0, r0 197#endif 198 199 mcount_exit 200.endm 201 202.macro __ftrace_graph_caller 203 sub r0, fp, #4 @ &lr of instrumented routine (&parent) 204#ifdef CONFIG_DYNAMIC_FTRACE 205 @ called from __ftrace_caller, saved in mcount_enter 206 ldr r1, [sp, #16] @ instrumented routine (func) 207 mcount_adjust_addr r1, r1 208#else 209 @ called from __mcount, untouched in lr 210 mcount_adjust_addr r1, lr @ instrumented routine (func) 211#endif 212 mov r2, fp @ frame pointer 213 bl prepare_ftrace_return 214 mcount_exit 215.endm 216 217#ifdef CONFIG_OLD_MCOUNT 218/* 219 * mcount 220 */ 221 222.macro mcount_enter 223 stmdb sp!, {r0-r3, lr} 224.endm 225 226.macro mcount_get_lr reg 227 ldr \reg, [fp, #-4] 228.endm 229 230.macro mcount_exit 231 ldr lr, [fp, #-4] 232 ldmia sp!, {r0-r3, pc} 233.endm 234 235ENTRY(mcount) 236#ifdef CONFIG_DYNAMIC_FTRACE 237 stmdb sp!, {lr} 238 ldr lr, [fp, #-4] 239 ldmia sp!, {pc} 240#else 241 __mcount _old 242#endif 243ENDPROC(mcount) 244 245#ifdef CONFIG_DYNAMIC_FTRACE 246ENTRY(ftrace_caller_old) 247 __ftrace_caller _old 248ENDPROC(ftrace_caller_old) 249#endif 250 251#ifdef CONFIG_FUNCTION_GRAPH_TRACER 252ENTRY(ftrace_graph_caller_old) 253 __ftrace_graph_caller 254ENDPROC(ftrace_graph_caller_old) 255#endif 256 257.purgem mcount_enter 258.purgem mcount_get_lr 259.purgem mcount_exit 260#endif 261 262/* 263 * __gnu_mcount_nc 264 */ 265 266.macro mcount_enter 267 stmdb sp!, {r0-r3, lr} 268.endm 269 270.macro mcount_get_lr reg 271 ldr \reg, [sp, #20] 272.endm 273 274.macro mcount_exit 275 ldmia sp!, {r0-r3, ip, lr} 276 mov pc, ip 277.endm 278 279ENTRY(__gnu_mcount_nc) 280#ifdef CONFIG_DYNAMIC_FTRACE 281 mov ip, lr 282 ldmia sp!, {lr} 283 mov pc, ip 284#else 285 __mcount 286#endif 287ENDPROC(__gnu_mcount_nc) 288 289#ifdef CONFIG_DYNAMIC_FTRACE 290ENTRY(ftrace_caller) 291 __ftrace_caller 292ENDPROC(ftrace_caller) 293#endif 294 295#ifdef CONFIG_FUNCTION_GRAPH_TRACER 296ENTRY(ftrace_graph_caller) 297 __ftrace_graph_caller 298ENDPROC(ftrace_graph_caller) 299#endif 300 301.purgem mcount_enter 302.purgem mcount_get_lr 303.purgem mcount_exit 304 305#ifdef CONFIG_FUNCTION_GRAPH_TRACER 306 .globl return_to_handler 307return_to_handler: 308 stmdb sp!, {r0-r3} 309 mov r0, fp @ frame pointer 310 bl ftrace_return_to_handler 311 mov lr, r0 @ r0 has real ret addr 312 ldmia sp!, {r0-r3} 313 mov pc, lr 314#endif 315 316ENTRY(ftrace_stub) 317.Lftrace_stub: 318 mov pc, lr 319ENDPROC(ftrace_stub) 320 321#endif /* CONFIG_FUNCTION_TRACER */ 322 323/*============================================================================= 324 * SWI handler 325 *----------------------------------------------------------------------------- 326 */ 327 328 .align 5 329ENTRY(vector_swi) 330 sub sp, sp, #S_FRAME_SIZE 331 stmia sp, {r0 - r12} @ Calling r0 - r12 332 ARM( add r8, sp, #S_PC ) 333 ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr 334 THUMB( mov r8, sp ) 335 THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr 336 mrs r8, spsr @ called from non-FIQ mode, so ok. 337 str lr, [sp, #S_PC] @ Save calling PC 338 str r8, [sp, #S_PSR] @ Save CPSR 339 str r0, [sp, #S_OLD_R0] @ Save OLD_R0 340 zero_fp 341 342 /* 343 * Get the system call number. 344 */ 345 346#if defined(CONFIG_OABI_COMPAT) 347 348 /* 349 * If we have CONFIG_OABI_COMPAT then we need to look at the swi 350 * value to determine if it is an EABI or an old ABI call. 351 */ 352#ifdef CONFIG_ARM_THUMB 353 tst r8, #PSR_T_BIT 354 movne r10, #0 @ no thumb OABI emulation 355 ldreq r10, [lr, #-4] @ get SWI instruction 356#else 357 ldr r10, [lr, #-4] @ get SWI instruction 358#endif 359#ifdef CONFIG_CPU_ENDIAN_BE8 360 rev r10, r10 @ little endian instruction 361#endif 362 363#elif defined(CONFIG_AEABI) 364 365 /* 366 * Pure EABI user space always put syscall number into scno (r7). 367 */ 368#elif defined(CONFIG_ARM_THUMB) 369 /* Legacy ABI only, possibly thumb mode. */ 370 tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs 371 addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in 372 ldreq scno, [lr, #-4] 373 374#else 375 /* Legacy ABI only. */ 376 ldr scno, [lr, #-4] @ get SWI instruction 377#endif 378 379#ifdef CONFIG_ALIGNMENT_TRAP 380 ldr ip, __cr_alignment 381 ldr ip, [ip] 382 mcr p15, 0, ip, c1, c0 @ update control register 383#endif 384 enable_irq 385 386 get_thread_info tsk 387 adr tbl, sys_call_table @ load syscall table pointer 388 389#if defined(CONFIG_OABI_COMPAT) 390 /* 391 * If the swi argument is zero, this is an EABI call and we do nothing. 392 * 393 * If this is an old ABI call, get the syscall number into scno and 394 * get the old ABI syscall table address. 395 */ 396 bics r10, r10, #0xff000000 397 eorne scno, r10, #__NR_OABI_SYSCALL_BASE 398 ldrne tbl, =sys_oabi_call_table 399#elif !defined(CONFIG_AEABI) 400 bic scno, scno, #0xff000000 @ mask off SWI op-code 401 eor scno, scno, #__NR_SYSCALL_BASE @ check OS number 402#endif 403 404local_restart: 405 ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing 406 stmdb sp!, {r4, r5} @ push fifth and sixth args 407 408#ifdef CONFIG_SECCOMP 409 tst r10, #_TIF_SECCOMP 410 beq 1f 411 mov r0, scno 412 bl __secure_computing 413 add r0, sp, #S_R0 + S_OFF @ pointer to regs 414 ldmia r0, {r0 - r3} @ have to reload r0 - r3 4151: 416#endif 417 418 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? 419 bne __sys_trace 420 421 cmp scno, #NR_syscalls @ check upper syscall limit 422 adr lr, BSYM(ret_fast_syscall) @ return address 423 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 424 425 add r1, sp, #S_OFF 4262: mov why, #0 @ no longer a real syscall 427 cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) 428 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back 429 bcs arm_syscall 430 b sys_ni_syscall @ not private func 431ENDPROC(vector_swi) 432 433 /* 434 * This is the really slow path. We're going to be doing 435 * context switches, and waiting for our parent to respond. 436 */ 437__sys_trace: 438 mov r1, scno 439 add r0, sp, #S_OFF 440 bl syscall_trace_enter 441 442 adr lr, BSYM(__sys_trace_return) @ return address 443 mov scno, r0 @ syscall number (possibly new) 444 add r1, sp, #S_R0 + S_OFF @ pointer to regs 445 cmp scno, #NR_syscalls @ check upper syscall limit 446 ldmccia r1, {r0 - r6} @ have to reload r0 - r6 447 stmccia sp, {r4, r5} @ and update the stack args 448 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 449 b 2b 450 451__sys_trace_return: 452 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 453 mov r1, scno 454 mov r0, sp 455 bl syscall_trace_exit 456 b ret_slow_syscall 457 458 .align 5 459#ifdef CONFIG_ALIGNMENT_TRAP 460 .type __cr_alignment, #object 461__cr_alignment: 462 .word cr_alignment 463#endif 464 .ltorg 465 466/* 467 * This is the syscall table declaration for native ABI syscalls. 468 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall. 469 */ 470#define ABI(native, compat) native 471#ifdef CONFIG_AEABI 472#define OBSOLETE(syscall) sys_ni_syscall 473#else 474#define OBSOLETE(syscall) syscall 475#endif 476 477 .type sys_call_table, #object 478ENTRY(sys_call_table) 479#include "calls.S" 480#undef ABI 481#undef OBSOLETE 482 483/*============================================================================ 484 * Special system call wrappers 485 */ 486@ r0 = syscall number 487@ r8 = syscall table 488sys_syscall: 489 bic scno, r0, #__NR_OABI_SYSCALL_BASE 490 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE 491 cmpne scno, #NR_syscalls @ check range 492 stmloia sp, {r5, r6} @ shuffle args 493 movlo r0, r1 494 movlo r1, r2 495 movlo r2, r3 496 movlo r3, r4 497 ldrlo pc, [tbl, scno, lsl #2] 498 b sys_ni_syscall 499ENDPROC(sys_syscall) 500 501sys_fork_wrapper: 502 add r0, sp, #S_OFF 503 b sys_fork 504ENDPROC(sys_fork_wrapper) 505 506sys_vfork_wrapper: 507 add r0, sp, #S_OFF 508 b sys_vfork 509ENDPROC(sys_vfork_wrapper) 510 511sys_execve_wrapper: 512 add r3, sp, #S_OFF 513 b sys_execve 514ENDPROC(sys_execve_wrapper) 515 516sys_clone_wrapper: 517 add ip, sp, #S_OFF 518 str ip, [sp, #4] 519 b sys_clone 520ENDPROC(sys_clone_wrapper) 521 522sys_sigreturn_wrapper: 523 add r0, sp, #S_OFF 524 mov why, #0 @ prevent syscall restart handling 525 b sys_sigreturn 526ENDPROC(sys_sigreturn_wrapper) 527 528sys_rt_sigreturn_wrapper: 529 add r0, sp, #S_OFF 530 mov why, #0 @ prevent syscall restart handling 531 b sys_rt_sigreturn 532ENDPROC(sys_rt_sigreturn_wrapper) 533 534sys_sigaltstack_wrapper: 535 ldr r2, [sp, #S_OFF + S_SP] 536 b do_sigaltstack 537ENDPROC(sys_sigaltstack_wrapper) 538 539sys_statfs64_wrapper: 540 teq r1, #88 541 moveq r1, #84 542 b sys_statfs64 543ENDPROC(sys_statfs64_wrapper) 544 545sys_fstatfs64_wrapper: 546 teq r1, #88 547 moveq r1, #84 548 b sys_fstatfs64 549ENDPROC(sys_fstatfs64_wrapper) 550 551/* 552 * Note: off_4k (r5) is always units of 4K. If we can't do the requested 553 * offset, we return EINVAL. 554 */ 555sys_mmap2: 556#if PAGE_SHIFT > 12 557 tst r5, #PGOFF_MASK 558 moveq r5, r5, lsr #PAGE_SHIFT - 12 559 streq r5, [sp, #4] 560 beq sys_mmap_pgoff 561 mov r0, #-EINVAL 562 mov pc, lr 563#else 564 str r5, [sp, #4] 565 b sys_mmap_pgoff 566#endif 567ENDPROC(sys_mmap2) 568 569#ifdef CONFIG_OABI_COMPAT 570 571/* 572 * These are syscalls with argument register differences 573 */ 574 575sys_oabi_pread64: 576 stmia sp, {r3, r4} 577 b sys_pread64 578ENDPROC(sys_oabi_pread64) 579 580sys_oabi_pwrite64: 581 stmia sp, {r3, r4} 582 b sys_pwrite64 583ENDPROC(sys_oabi_pwrite64) 584 585sys_oabi_truncate64: 586 mov r3, r2 587 mov r2, r1 588 b sys_truncate64 589ENDPROC(sys_oabi_truncate64) 590 591sys_oabi_ftruncate64: 592 mov r3, r2 593 mov r2, r1 594 b sys_ftruncate64 595ENDPROC(sys_oabi_ftruncate64) 596 597sys_oabi_readahead: 598 str r3, [sp] 599 mov r3, r2 600 mov r2, r1 601 b sys_readahead 602ENDPROC(sys_oabi_readahead) 603 604/* 605 * Let's declare a second syscall table for old ABI binaries 606 * using the compatibility syscall entries. 607 */ 608#define ABI(native, compat) compat 609#define OBSOLETE(syscall) syscall 610 611 .type sys_oabi_call_table, #object 612ENTRY(sys_oabi_call_table) 613#include "calls.S" 614#undef ABI 615#undef OBSOLETE 616 617#endif 618 619