1/* 2 * linux/arch/arm/kernel/entry-common.S 3 * 4 * Copyright (C) 2000 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11#include <asm/assembler.h> 12#include <asm/unistd.h> 13#include <asm/ftrace.h> 14#include <asm/unwind.h> 15#ifdef CONFIG_AEABI 16#include <asm/unistd-oabi.h> 17#endif 18 19 .equ NR_syscalls, __NR_syscalls 20 21#ifdef CONFIG_NEED_RET_TO_USER 22#include <mach/entry-macro.S> 23#else 24 .macro arch_ret_to_user, tmp1, tmp2 25 .endm 26#endif 27 28#include "entry-header.S" 29 30 31 .align 5 32#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING)) 33/* 34 * This is the fast syscall return path. We do as little as possible here, 35 * such as avoiding writing r0 to the stack. We only use this path if we 36 * have tracing and context tracking disabled - the overheads from those 37 * features make this path too inefficient. 38 */ 39ret_fast_syscall: 40 UNWIND(.fnstart ) 41 UNWIND(.cantunwind ) 42 disable_irq_notrace @ disable interrupts 43 ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing 44 tst r1, #_TIF_SYSCALL_WORK 45 bne fast_work_pending 46 tst r1, #_TIF_WORK_MASK 47 bne fast_work_pending 48 49 /* perform architecture specific actions before user return */ 50 arch_ret_to_user r1, lr 51 52 restore_user_regs fast = 1, offset = S_OFF 53 UNWIND(.fnend ) 54ENDPROC(ret_fast_syscall) 55 56 /* Ok, we need to do extra processing, enter the slow path. */ 57fast_work_pending: 58 str r0, [sp, #S_R0+S_OFF]! @ returned r0 59 /* fall through to work_pending */ 60#else 61/* 62 * The "replacement" ret_fast_syscall for when tracing or context tracking 63 * is enabled. As we will need to call out to some C functions, we save 64 * r0 first to avoid needing to save registers around each C function call. 65 */ 66ret_fast_syscall: 67 UNWIND(.fnstart ) 68 UNWIND(.cantunwind ) 69 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 70 disable_irq_notrace @ disable interrupts 71 ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing 72 tst r1, #_TIF_SYSCALL_WORK 73 bne fast_work_pending 74 tst r1, #_TIF_WORK_MASK 75 beq no_work_pending 76 UNWIND(.fnend ) 77ENDPROC(ret_fast_syscall) 78 79 /* Slower path - fall through to work_pending */ 80fast_work_pending: 81#endif 82 83 tst r1, #_TIF_SYSCALL_WORK 84 bne __sys_trace_return_nosave 85slow_work_pending: 86 mov r0, sp @ 'regs' 87 mov r2, why @ 'syscall' 88 bl do_work_pending 89 cmp r0, #0 90 beq no_work_pending 91 movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE) 92 ldmia sp, {r0 - r6} @ have to reload r0 - r6 93 b local_restart @ ... and off we go 94ENDPROC(ret_fast_syscall) 95 96/* 97 * "slow" syscall return path. "why" tells us if this was a real syscall. 98 * IRQs may be enabled here, so always disable them. Note that we use the 99 * "notrace" version to avoid calling into the tracing code unnecessarily. 100 * do_work_pending() will update this state if necessary. 101 */ 102ENTRY(ret_to_user) 103ret_slow_syscall: 104 disable_irq_notrace @ disable interrupts 105ENTRY(ret_to_user_from_irq) 106 ldr r1, [tsk, #TI_FLAGS] 107 tst r1, #_TIF_WORK_MASK 108 bne slow_work_pending 109no_work_pending: 110 asm_trace_hardirqs_on save = 0 111 112 /* perform architecture specific actions before user return */ 113 arch_ret_to_user r1, lr 114 ct_user_enter save = 0 115 116 restore_user_regs fast = 0, offset = 0 117ENDPROC(ret_to_user_from_irq) 118ENDPROC(ret_to_user) 119 120/* 121 * This is how we return from a fork. 122 */ 123ENTRY(ret_from_fork) 124 bl schedule_tail 125 cmp r5, #0 126 movne r0, r4 127 badrne lr, 1f 128 retne r5 1291: get_thread_info tsk 130 b ret_slow_syscall 131ENDPROC(ret_from_fork) 132 133/*============================================================================= 134 * SWI handler 135 *----------------------------------------------------------------------------- 136 */ 137 138 .align 5 139ENTRY(vector_swi) 140#ifdef CONFIG_CPU_V7M 141 v7m_exception_entry 142#else 143 sub sp, sp, #PT_REGS_SIZE 144 stmia sp, {r0 - r12} @ Calling r0 - r12 145 ARM( add r8, sp, #S_PC ) 146 ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr 147 THUMB( mov r8, sp ) 148 THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr 149 mrs r8, spsr @ called from non-FIQ mode, so ok. 150 str lr, [sp, #S_PC] @ Save calling PC 151 str r8, [sp, #S_PSR] @ Save CPSR 152 str r0, [sp, #S_OLD_R0] @ Save OLD_R0 153#endif 154 zero_fp 155 alignment_trap r10, ip, __cr_alignment 156 enable_irq 157 ct_user_exit 158 get_thread_info tsk 159 160 /* 161 * Get the system call number. 162 */ 163 164#if defined(CONFIG_OABI_COMPAT) 165 166 /* 167 * If we have CONFIG_OABI_COMPAT then we need to look at the swi 168 * value to determine if it is an EABI or an old ABI call. 169 */ 170#ifdef CONFIG_ARM_THUMB 171 tst r8, #PSR_T_BIT 172 movne r10, #0 @ no thumb OABI emulation 173 USER( ldreq r10, [lr, #-4] ) @ get SWI instruction 174#else 175 USER( ldr r10, [lr, #-4] ) @ get SWI instruction 176#endif 177 ARM_BE8(rev r10, r10) @ little endian instruction 178 179#elif defined(CONFIG_AEABI) 180 181 /* 182 * Pure EABI user space always put syscall number into scno (r7). 183 */ 184#elif defined(CONFIG_ARM_THUMB) 185 /* Legacy ABI only, possibly thumb mode. */ 186 tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs 187 addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in 188 USER( ldreq scno, [lr, #-4] ) 189 190#else 191 /* Legacy ABI only. */ 192 USER( ldr scno, [lr, #-4] ) @ get SWI instruction 193#endif 194 195 uaccess_disable tbl 196 197 adr tbl, sys_call_table @ load syscall table pointer 198 199#if defined(CONFIG_OABI_COMPAT) 200 /* 201 * If the swi argument is zero, this is an EABI call and we do nothing. 202 * 203 * If this is an old ABI call, get the syscall number into scno and 204 * get the old ABI syscall table address. 205 */ 206 bics r10, r10, #0xff000000 207 eorne scno, r10, #__NR_OABI_SYSCALL_BASE 208 ldrne tbl, =sys_oabi_call_table 209#elif !defined(CONFIG_AEABI) 210 bic scno, scno, #0xff000000 @ mask off SWI op-code 211 eor scno, scno, #__NR_SYSCALL_BASE @ check OS number 212#endif 213 214local_restart: 215 ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing 216 stmdb sp!, {r4, r5} @ push fifth and sixth args 217 218 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? 219 bne __sys_trace 220 221 cmp scno, #NR_syscalls @ check upper syscall limit 222 badr lr, ret_fast_syscall @ return address 223 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 224 225 add r1, sp, #S_OFF 2262: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) 227 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back 228 bcs arm_syscall 229 mov why, #0 @ no longer a real syscall 230 b sys_ni_syscall @ not private func 231 232#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI) 233 /* 234 * We failed to handle a fault trying to access the page 235 * containing the swi instruction, but we're not really in a 236 * position to return -EFAULT. Instead, return back to the 237 * instruction and re-enter the user fault handling path trying 238 * to page it in. This will likely result in sending SEGV to the 239 * current task. 240 */ 2419001: 242 sub lr, lr, #4 243 str lr, [sp, #S_PC] 244 b ret_fast_syscall 245#endif 246ENDPROC(vector_swi) 247 248 /* 249 * This is the really slow path. We're going to be doing 250 * context switches, and waiting for our parent to respond. 251 */ 252__sys_trace: 253 mov r1, scno 254 add r0, sp, #S_OFF 255 bl syscall_trace_enter 256 257 badr lr, __sys_trace_return @ return address 258 mov scno, r0 @ syscall number (possibly new) 259 add r1, sp, #S_R0 + S_OFF @ pointer to regs 260 cmp scno, #NR_syscalls @ check upper syscall limit 261 ldmccia r1, {r0 - r6} @ have to reload r0 - r6 262 stmccia sp, {r4, r5} @ and update the stack args 263 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 264 cmp scno, #-1 @ skip the syscall? 265 bne 2b 266 add sp, sp, #S_OFF @ restore stack 267 b ret_slow_syscall 268 269__sys_trace_return: 270 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 271 mov r0, sp 272 bl syscall_trace_exit 273 b ret_slow_syscall 274 275__sys_trace_return_nosave: 276 enable_irq_notrace 277 mov r0, sp 278 bl syscall_trace_exit 279 b ret_slow_syscall 280 281 .align 5 282#ifdef CONFIG_ALIGNMENT_TRAP 283 .type __cr_alignment, #object 284__cr_alignment: 285 .word cr_alignment 286#endif 287 .ltorg 288 289 .macro syscall_table_start, sym 290 .equ __sys_nr, 0 291 .type \sym, #object 292ENTRY(\sym) 293 .endm 294 295 .macro syscall, nr, func 296 .ifgt __sys_nr - \nr 297 .error "Duplicated/unorded system call entry" 298 .endif 299 .rept \nr - __sys_nr 300 .long sys_ni_syscall 301 .endr 302 .long \func 303 .equ __sys_nr, \nr + 1 304 .endm 305 306 .macro syscall_table_end, sym 307 .ifgt __sys_nr - __NR_syscalls 308 .error "System call table too big" 309 .endif 310 .rept __NR_syscalls - __sys_nr 311 .long sys_ni_syscall 312 .endr 313 .size \sym, . - \sym 314 .endm 315 316#define NATIVE(nr, func) syscall nr, func 317 318/* 319 * This is the syscall table declaration for native ABI syscalls. 320 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall. 321 */ 322 syscall_table_start sys_call_table 323#define COMPAT(nr, native, compat) syscall nr, native 324#ifdef CONFIG_AEABI 325#include <calls-eabi.S> 326#else 327#include <calls-oabi.S> 328#endif 329#undef COMPAT 330 syscall_table_end sys_call_table 331 332/*============================================================================ 333 * Special system call wrappers 334 */ 335@ r0 = syscall number 336@ r8 = syscall table 337sys_syscall: 338 bic scno, r0, #__NR_OABI_SYSCALL_BASE 339 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE 340 cmpne scno, #NR_syscalls @ check range 341 stmloia sp, {r5, r6} @ shuffle args 342 movlo r0, r1 343 movlo r1, r2 344 movlo r2, r3 345 movlo r3, r4 346 ldrlo pc, [tbl, scno, lsl #2] 347 b sys_ni_syscall 348ENDPROC(sys_syscall) 349 350sys_sigreturn_wrapper: 351 add r0, sp, #S_OFF 352 mov why, #0 @ prevent syscall restart handling 353 b sys_sigreturn 354ENDPROC(sys_sigreturn_wrapper) 355 356sys_rt_sigreturn_wrapper: 357 add r0, sp, #S_OFF 358 mov why, #0 @ prevent syscall restart handling 359 b sys_rt_sigreturn 360ENDPROC(sys_rt_sigreturn_wrapper) 361 362sys_statfs64_wrapper: 363 teq r1, #88 364 moveq r1, #84 365 b sys_statfs64 366ENDPROC(sys_statfs64_wrapper) 367 368sys_fstatfs64_wrapper: 369 teq r1, #88 370 moveq r1, #84 371 b sys_fstatfs64 372ENDPROC(sys_fstatfs64_wrapper) 373 374/* 375 * Note: off_4k (r5) is always units of 4K. If we can't do the requested 376 * offset, we return EINVAL. 377 */ 378sys_mmap2: 379#if PAGE_SHIFT > 12 380 tst r5, #PGOFF_MASK 381 moveq r5, r5, lsr #PAGE_SHIFT - 12 382 streq r5, [sp, #4] 383 beq sys_mmap_pgoff 384 mov r0, #-EINVAL 385 ret lr 386#else 387 str r5, [sp, #4] 388 b sys_mmap_pgoff 389#endif 390ENDPROC(sys_mmap2) 391 392#ifdef CONFIG_OABI_COMPAT 393 394/* 395 * These are syscalls with argument register differences 396 */ 397 398sys_oabi_pread64: 399 stmia sp, {r3, r4} 400 b sys_pread64 401ENDPROC(sys_oabi_pread64) 402 403sys_oabi_pwrite64: 404 stmia sp, {r3, r4} 405 b sys_pwrite64 406ENDPROC(sys_oabi_pwrite64) 407 408sys_oabi_truncate64: 409 mov r3, r2 410 mov r2, r1 411 b sys_truncate64 412ENDPROC(sys_oabi_truncate64) 413 414sys_oabi_ftruncate64: 415 mov r3, r2 416 mov r2, r1 417 b sys_ftruncate64 418ENDPROC(sys_oabi_ftruncate64) 419 420sys_oabi_readahead: 421 str r3, [sp] 422 mov r3, r2 423 mov r2, r1 424 b sys_readahead 425ENDPROC(sys_oabi_readahead) 426 427/* 428 * Let's declare a second syscall table for old ABI binaries 429 * using the compatibility syscall entries. 430 */ 431 syscall_table_start sys_oabi_call_table 432#define COMPAT(nr, native, compat) syscall nr, compat 433#include <calls-oabi.S> 434 syscall_table_end sys_oabi_call_table 435 436#endif 437 438