1/* 2 * linux/arch/arm/kernel/entry-common.S 3 * 4 * Copyright (C) 2000 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11#include <asm/unistd.h> 12#include <asm/ftrace.h> 13#include <mach/entry-macro.S> 14#include <asm/unwind.h> 15 16#include "entry-header.S" 17 18 19 .align 5 20/* 21 * This is the fast syscall return path. We do as little as 22 * possible here, and this includes saving r0 back into the SVC 23 * stack. 24 */ 25ret_fast_syscall: 26 UNWIND(.fnstart ) 27 UNWIND(.cantunwind ) 28 disable_irq @ disable interrupts 29 ldr r1, [tsk, #TI_FLAGS] 30 tst r1, #_TIF_WORK_MASK 31 bne fast_work_pending 32 33 /* perform architecture specific actions before user return */ 34 arch_ret_to_user r1, lr 35 36 restore_user_regs fast = 1, offset = S_OFF 37 UNWIND(.fnend ) 38 39/* 40 * Ok, we need to do extra processing, enter the slow path. 41 */ 42fast_work_pending: 43 str r0, [sp, #S_R0+S_OFF]! @ returned r0 44work_pending: 45 tst r1, #_TIF_NEED_RESCHED 46 bne work_resched 47 tst r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME 48 beq no_work_pending 49 mov r0, sp @ 'regs' 50 mov r2, why @ 'syscall' 51 bl do_notify_resume 52 b ret_slow_syscall @ Check work again 53 54work_resched: 55 bl schedule 56/* 57 * "slow" syscall return path. "why" tells us if this was a real syscall. 58 */ 59ENTRY(ret_to_user) 60ret_slow_syscall: 61 disable_irq @ disable interrupts 62 ldr r1, [tsk, #TI_FLAGS] 63 tst r1, #_TIF_WORK_MASK 64 bne work_pending 65no_work_pending: 66 /* perform architecture specific actions before user return */ 67 arch_ret_to_user r1, lr 68 69 restore_user_regs fast = 0, offset = 0 70ENDPROC(ret_to_user) 71 72/* 73 * This is how we return from a fork. 74 */ 75ENTRY(ret_from_fork) 76 bl schedule_tail 77 get_thread_info tsk 78 ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing 79 mov why, #1 80 tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? 81 beq ret_slow_syscall 82 mov r1, sp 83 mov r0, #1 @ trace exit [IP = 1] 84 bl syscall_trace 85 b ret_slow_syscall 86ENDPROC(ret_from_fork) 87 88 .equ NR_syscalls,0 89#define CALL(x) .equ NR_syscalls,NR_syscalls+1 90#include "calls.S" 91#undef CALL 92#define CALL(x) .long x 93 94#ifdef CONFIG_FUNCTION_TRACER 95#ifdef CONFIG_DYNAMIC_FTRACE 96ENTRY(mcount) 97 stmdb sp!, {r0-r3, lr} 98 mov r0, lr 99 sub r0, r0, #MCOUNT_INSN_SIZE 100 101 .globl mcount_call 102mcount_call: 103 bl ftrace_stub 104 ldr lr, [fp, #-4] @ restore lr 105 ldmia sp!, {r0-r3, pc} 106 107ENTRY(ftrace_caller) 108 stmdb sp!, {r0-r3, lr} 109 ldr r1, [fp, #-4] 110 mov r0, lr 111 sub r0, r0, #MCOUNT_INSN_SIZE 112 113 .globl ftrace_call 114ftrace_call: 115 bl ftrace_stub 116 ldr lr, [fp, #-4] @ restore lr 117 ldmia sp!, {r0-r3, pc} 118 119#else 120 121ENTRY(__gnu_mcount_nc) 122 stmdb sp!, {r0-r3, lr} 123 ldr r0, =ftrace_trace_function 124 ldr r2, [r0] 125 adr r0, ftrace_stub 126 cmp r0, r2 127 bne gnu_trace 128 ldmia sp!, {r0-r3, ip, lr} 129 mov pc, ip 130 131gnu_trace: 132 ldr r1, [sp, #20] @ lr of instrumented routine 133 mov r0, lr 134 sub r0, r0, #MCOUNT_INSN_SIZE 135 mov lr, pc 136 mov pc, r2 137 ldmia sp!, {r0-r3, ip, lr} 138 mov pc, ip 139 140ENTRY(mcount) 141 stmdb sp!, {r0-r3, lr} 142 ldr r0, =ftrace_trace_function 143 ldr r2, [r0] 144 adr r0, ftrace_stub 145 cmp r0, r2 146 bne trace 147 ldr lr, [fp, #-4] @ restore lr 148 ldmia sp!, {r0-r3, pc} 149 150trace: 151 ldr r1, [fp, #-4] @ lr of instrumented routine 152 mov r0, lr 153 sub r0, r0, #MCOUNT_INSN_SIZE 154 mov lr, pc 155 mov pc, r2 156 ldr lr, [fp, #-4] @ restore lr 157 ldmia sp!, {r0-r3, pc} 158 159#endif /* CONFIG_DYNAMIC_FTRACE */ 160 161 .globl ftrace_stub 162ftrace_stub: 163 mov pc, lr 164 165#endif /* CONFIG_FUNCTION_TRACER */ 166 167/*============================================================================= 168 * SWI handler 169 *----------------------------------------------------------------------------- 170 */ 171 172 /* If we're optimising for StrongARM the resulting code won't 173 run on an ARM7 and we can save a couple of instructions. 174 --pb */ 175#ifdef CONFIG_CPU_ARM710 176#define A710(code...) code 177.Larm710bug: 178 ldmia sp, {r0 - lr}^ @ Get calling r0 - lr 179 mov r0, r0 180 add sp, sp, #S_FRAME_SIZE 181 subs pc, lr, #4 182#else 183#define A710(code...) 184#endif 185 186 .align 5 187ENTRY(vector_swi) 188 sub sp, sp, #S_FRAME_SIZE 189 stmia sp, {r0 - r12} @ Calling r0 - r12 190 ARM( add r8, sp, #S_PC ) 191 ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr 192 THUMB( mov r8, sp ) 193 THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr 194 mrs r8, spsr @ called from non-FIQ mode, so ok. 195 str lr, [sp, #S_PC] @ Save calling PC 196 str r8, [sp, #S_PSR] @ Save CPSR 197 str r0, [sp, #S_OLD_R0] @ Save OLD_R0 198 zero_fp 199 200 /* 201 * Get the system call number. 202 */ 203 204#if defined(CONFIG_OABI_COMPAT) 205 206 /* 207 * If we have CONFIG_OABI_COMPAT then we need to look at the swi 208 * value to determine if it is an EABI or an old ABI call. 209 */ 210#ifdef CONFIG_ARM_THUMB 211 tst r8, #PSR_T_BIT 212 movne r10, #0 @ no thumb OABI emulation 213 ldreq r10, [lr, #-4] @ get SWI instruction 214#else 215 ldr r10, [lr, #-4] @ get SWI instruction 216 A710( and ip, r10, #0x0f000000 @ check for SWI ) 217 A710( teq ip, #0x0f000000 ) 218 A710( bne .Larm710bug ) 219#endif 220#ifdef CONFIG_CPU_ENDIAN_BE8 221 rev r10, r10 @ little endian instruction 222#endif 223 224#elif defined(CONFIG_AEABI) 225 226 /* 227 * Pure EABI user space always put syscall number into scno (r7). 228 */ 229 A710( ldr ip, [lr, #-4] @ get SWI instruction ) 230 A710( and ip, ip, #0x0f000000 @ check for SWI ) 231 A710( teq ip, #0x0f000000 ) 232 A710( bne .Larm710bug ) 233 234#elif defined(CONFIG_ARM_THUMB) 235 236 /* Legacy ABI only, possibly thumb mode. */ 237 tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs 238 addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in 239 ldreq scno, [lr, #-4] 240 241#else 242 243 /* Legacy ABI only. */ 244 ldr scno, [lr, #-4] @ get SWI instruction 245 A710( and ip, scno, #0x0f000000 @ check for SWI ) 246 A710( teq ip, #0x0f000000 ) 247 A710( bne .Larm710bug ) 248 249#endif 250 251#ifdef CONFIG_ALIGNMENT_TRAP 252 ldr ip, __cr_alignment 253 ldr ip, [ip] 254 mcr p15, 0, ip, c1, c0 @ update control register 255#endif 256 enable_irq 257 258 get_thread_info tsk 259 adr tbl, sys_call_table @ load syscall table pointer 260 ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing 261 262#if defined(CONFIG_OABI_COMPAT) 263 /* 264 * If the swi argument is zero, this is an EABI call and we do nothing. 265 * 266 * If this is an old ABI call, get the syscall number into scno and 267 * get the old ABI syscall table address. 268 */ 269 bics r10, r10, #0xff000000 270 eorne scno, r10, #__NR_OABI_SYSCALL_BASE 271 ldrne tbl, =sys_oabi_call_table 272#elif !defined(CONFIG_AEABI) 273 bic scno, scno, #0xff000000 @ mask off SWI op-code 274 eor scno, scno, #__NR_SYSCALL_BASE @ check OS number 275#endif 276 277 stmdb sp!, {r4, r5} @ push fifth and sixth args 278 tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? 279 bne __sys_trace 280 281 cmp scno, #NR_syscalls @ check upper syscall limit 282 adr lr, BSYM(ret_fast_syscall) @ return address 283 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 284 285 add r1, sp, #S_OFF 2862: mov why, #0 @ no longer a real syscall 287 cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) 288 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back 289 bcs arm_syscall 290 b sys_ni_syscall @ not private func 291ENDPROC(vector_swi) 292 293 /* 294 * This is the really slow path. We're going to be doing 295 * context switches, and waiting for our parent to respond. 296 */ 297__sys_trace: 298 mov r2, scno 299 add r1, sp, #S_OFF 300 mov r0, #0 @ trace entry [IP = 0] 301 bl syscall_trace 302 303 adr lr, BSYM(__sys_trace_return) @ return address 304 mov scno, r0 @ syscall number (possibly new) 305 add r1, sp, #S_R0 + S_OFF @ pointer to regs 306 cmp scno, #NR_syscalls @ check upper syscall limit 307 ldmccia r1, {r0 - r3} @ have to reload r0 - r3 308 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 309 b 2b 310 311__sys_trace_return: 312 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 313 mov r2, scno 314 mov r1, sp 315 mov r0, #1 @ trace exit [IP = 1] 316 bl syscall_trace 317 b ret_slow_syscall 318 319 .align 5 320#ifdef CONFIG_ALIGNMENT_TRAP 321 .type __cr_alignment, #object 322__cr_alignment: 323 .word cr_alignment 324#endif 325 .ltorg 326 327/* 328 * This is the syscall table declaration for native ABI syscalls. 329 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall. 330 */ 331#define ABI(native, compat) native 332#ifdef CONFIG_AEABI 333#define OBSOLETE(syscall) sys_ni_syscall 334#else 335#define OBSOLETE(syscall) syscall 336#endif 337 338 .type sys_call_table, #object 339ENTRY(sys_call_table) 340#include "calls.S" 341#undef ABI 342#undef OBSOLETE 343 344/*============================================================================ 345 * Special system call wrappers 346 */ 347@ r0 = syscall number 348@ r8 = syscall table 349sys_syscall: 350 bic scno, r0, #__NR_OABI_SYSCALL_BASE 351 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE 352 cmpne scno, #NR_syscalls @ check range 353 stmloia sp, {r5, r6} @ shuffle args 354 movlo r0, r1 355 movlo r1, r2 356 movlo r2, r3 357 movlo r3, r4 358 ldrlo pc, [tbl, scno, lsl #2] 359 b sys_ni_syscall 360ENDPROC(sys_syscall) 361 362sys_fork_wrapper: 363 add r0, sp, #S_OFF 364 b sys_fork 365ENDPROC(sys_fork_wrapper) 366 367sys_vfork_wrapper: 368 add r0, sp, #S_OFF 369 b sys_vfork 370ENDPROC(sys_vfork_wrapper) 371 372sys_execve_wrapper: 373 add r3, sp, #S_OFF 374 b sys_execve 375ENDPROC(sys_execve_wrapper) 376 377sys_clone_wrapper: 378 add ip, sp, #S_OFF 379 str ip, [sp, #4] 380 b sys_clone 381ENDPROC(sys_clone_wrapper) 382 383sys_sigreturn_wrapper: 384 add r0, sp, #S_OFF 385 b sys_sigreturn 386ENDPROC(sys_sigreturn_wrapper) 387 388sys_rt_sigreturn_wrapper: 389 add r0, sp, #S_OFF 390 b sys_rt_sigreturn 391ENDPROC(sys_rt_sigreturn_wrapper) 392 393sys_sigaltstack_wrapper: 394 ldr r2, [sp, #S_OFF + S_SP] 395 b do_sigaltstack 396ENDPROC(sys_sigaltstack_wrapper) 397 398sys_statfs64_wrapper: 399 teq r1, #88 400 moveq r1, #84 401 b sys_statfs64 402ENDPROC(sys_statfs64_wrapper) 403 404sys_fstatfs64_wrapper: 405 teq r1, #88 406 moveq r1, #84 407 b sys_fstatfs64 408ENDPROC(sys_fstatfs64_wrapper) 409 410/* 411 * Note: off_4k (r5) is always units of 4K. If we can't do the requested 412 * offset, we return EINVAL. 413 */ 414sys_mmap2: 415#if PAGE_SHIFT > 12 416 tst r5, #PGOFF_MASK 417 moveq r5, r5, lsr #PAGE_SHIFT - 12 418 streq r5, [sp, #4] 419 beq sys_mmap_pgoff 420 mov r0, #-EINVAL 421 mov pc, lr 422#else 423 str r5, [sp, #4] 424 b sys_mmap_pgoff 425#endif 426ENDPROC(sys_mmap2) 427 428#ifdef CONFIG_OABI_COMPAT 429 430/* 431 * These are syscalls with argument register differences 432 */ 433 434sys_oabi_pread64: 435 stmia sp, {r3, r4} 436 b sys_pread64 437ENDPROC(sys_oabi_pread64) 438 439sys_oabi_pwrite64: 440 stmia sp, {r3, r4} 441 b sys_pwrite64 442ENDPROC(sys_oabi_pwrite64) 443 444sys_oabi_truncate64: 445 mov r3, r2 446 mov r2, r1 447 b sys_truncate64 448ENDPROC(sys_oabi_truncate64) 449 450sys_oabi_ftruncate64: 451 mov r3, r2 452 mov r2, r1 453 b sys_ftruncate64 454ENDPROC(sys_oabi_ftruncate64) 455 456sys_oabi_readahead: 457 str r3, [sp] 458 mov r3, r2 459 mov r2, r1 460 b sys_readahead 461ENDPROC(sys_oabi_readahead) 462 463/* 464 * Let's declare a second syscall table for old ABI binaries 465 * using the compatibility syscall entries. 466 */ 467#define ABI(native, compat) compat 468#define OBSOLETE(syscall) syscall 469 470 .type sys_oabi_call_table, #object 471ENTRY(sys_oabi_call_table) 472#include "calls.S" 473#undef ABI 474#undef OBSOLETE 475 476#endif 477 478