entry-armv.S (c0e7f7ee717e2b4c5791e7422424c96b5008c39e) entry-armv.S (195b58add463f697fb802ed55e26759094d40a54)
1/*
2 * linux/arch/arm/kernel/entry-armv.S
3 *
4 * Copyright (C) 1996,1997,1998 Russell King.
5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
7 *
8 * This program is free software; you can redistribute it and/or modify

--- 132 unchanged lines hidden (view full) ---

141 */
142
143#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
144#define SPFIX(code...) code
145#else
146#define SPFIX(code...)
147#endif
148
1/*
2 * linux/arch/arm/kernel/entry-armv.S
3 *
4 * Copyright (C) 1996,1997,1998 Russell King.
5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
7 *
8 * This program is free software; you can redistribute it and/or modify

--- 132 unchanged lines hidden (view full) ---

141 */
142
143#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
144#define SPFIX(code...) code
145#else
146#define SPFIX(code...)
147#endif
148
149 .macro svc_entry, stack_hole=0, trace=1
149 .macro svc_entry, stack_hole=0
150 UNWIND(.fnstart )
151 UNWIND(.save {r0 - pc} )
152 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
153#ifdef CONFIG_THUMB2_KERNEL
154 SPFIX( str r0, [sp] ) @ temporarily saved
155 SPFIX( mov r0, sp )
156 SPFIX( tst r0, #4 ) @ test original stack alignment
157 SPFIX( ldr r0, [sp] ) @ restored

--- 19 unchanged lines hidden (view full) ---

177 @ r2 - sp_svc
178 @ r3 - lr_svc
179 @ r4 - lr_<exception>, already fixed up for correct return/restart
180 @ r5 - spsr_<exception>
181 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
182 @
183 stmia r7, {r2 - r6}
184
150 UNWIND(.fnstart )
151 UNWIND(.save {r0 - pc} )
152 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
153#ifdef CONFIG_THUMB2_KERNEL
154 SPFIX( str r0, [sp] ) @ temporarily saved
155 SPFIX( mov r0, sp )
156 SPFIX( tst r0, #4 ) @ test original stack alignment
157 SPFIX( ldr r0, [sp] ) @ restored

--- 19 unchanged lines hidden (view full) ---

177 @ r2 - sp_svc
178 @ r3 - lr_svc
179 @ r4 - lr_<exception>, already fixed up for correct return/restart
180 @ r5 - spsr_<exception>
181 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
182 @
183 stmia r7, {r2 - r6}
184
185 .if \trace
186#ifdef CONFIG_TRACE_IRQFLAGS
187 bl trace_hardirqs_off
188#endif
185#ifdef CONFIG_TRACE_IRQFLAGS
186 bl trace_hardirqs_off
187#endif
189 .endif
190 .endm
191
192 .align 5
193__dabt_svc:
194 svc_entry
195 mov r2, sp
196 dabt_helper
197 THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR

--- 94 unchanged lines hidden (view full) ---

292 svc_entry
293 mov r2, sp @ regs
294 pabt_helper
295 svc_exit r5 @ return from exception
296 UNWIND(.fnend )
297ENDPROC(__pabt_svc)
298
299 .align 5
188 .endm
189
190 .align 5
191__dabt_svc:
192 svc_entry
193 mov r2, sp
194 dabt_helper
195 THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR

--- 94 unchanged lines hidden (view full) ---

290 svc_entry
291 mov r2, sp @ regs
292 pabt_helper
293 svc_exit r5 @ return from exception
294 UNWIND(.fnend )
295ENDPROC(__pabt_svc)
296
297 .align 5
300__fiq_svc:
301 svc_entry trace=0
302 mov r0, sp @ struct pt_regs *regs
303 bl handle_fiq_as_nmi
304 svc_exit_via_fiq
305 UNWIND(.fnend )
306ENDPROC(__fiq_svc)
307
308 .align 5
309.LCcralign:
310 .word cr_alignment
311#ifdef MULTI_DABORT
312.LCprocfns:
313 .word processor
314#endif
315.LCfp:
316 .word fp_enter
317
318/*
298.LCcralign:
299 .word cr_alignment
300#ifdef MULTI_DABORT
301.LCprocfns:
302 .word processor
303#endif
304.LCfp:
305 .word fp_enter
306
307/*
319 * Abort mode handlers
320 */
321
322@
323@ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode
324@ and reuses the same macros. However in abort mode we must also
325@ save/restore lr_abt and spsr_abt to make nested aborts safe.
326@
327 .align 5
328__fiq_abt:
329 svc_entry trace=0
330
331 ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
332 THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
333 THUMB( msr cpsr_c, r0 )
334 mov r1, lr @ Save lr_abt
335 mrs r2, spsr @ Save spsr_abt, abort is now safe
336 ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
337 THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
338 THUMB( msr cpsr_c, r0 )
339 stmfd sp!, {r1 - r2}
340
341 add r0, sp, #8 @ struct pt_regs *regs
342 bl handle_fiq_as_nmi
343
344 ldmfd sp!, {r1 - r2}
345 ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
346 THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
347 THUMB( msr cpsr_c, r0 )
348 mov lr, r1 @ Restore lr_abt, abort is unsafe
349 msr spsr_cxsf, r2 @ Restore spsr_abt
350 ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
351 THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
352 THUMB( msr cpsr_c, r0 )
353
354 svc_exit_via_fiq
355 UNWIND(.fnend )
356ENDPROC(__fiq_abt)
357
358/*
359 * User mode handlers
360 *
361 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
362 */
363
364#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
365#error "sizeof(struct pt_regs) must be a multiple of 8"
366#endif
367
308 * User mode handlers
309 *
310 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
311 */
312
313#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
314#error "sizeof(struct pt_regs) must be a multiple of 8"
315#endif
316
368 .macro usr_entry, trace=1
317 .macro usr_entry
369 UNWIND(.fnstart )
370 UNWIND(.cantunwind ) @ don't unwind the user space
371 sub sp, sp, #S_FRAME_SIZE
372 ARM( stmib sp, {r1 - r12} )
373 THUMB( stmia sp, {r0 - r12} )
374
318 UNWIND(.fnstart )
319 UNWIND(.cantunwind ) @ don't unwind the user space
320 sub sp, sp, #S_FRAME_SIZE
321 ARM( stmib sp, {r1 - r12} )
322 THUMB( stmia sp, {r0 - r12} )
323
324 ATRAP( mrc p15, 0, r7, c1, c0, 0)
325 ATRAP( ldr r8, .LCcralign)
326
375 ldmia r0, {r3 - r5}
376 add r0, sp, #S_PC @ here for interlock avoidance
377 mov r6, #-1 @ "" "" "" ""
378
379 str r3, [sp] @ save the "real" r0 copied
380 @ from the exception stack
381
327 ldmia r0, {r3 - r5}
328 add r0, sp, #S_PC @ here for interlock avoidance
329 mov r6, #-1 @ "" "" "" ""
330
331 str r3, [sp] @ save the "real" r0 copied
332 @ from the exception stack
333
334 ATRAP( ldr r8, [r8, #0])
335
382 @
383 @ We are now ready to fill in the remaining blanks on the stack:
384 @
385 @ r4 - lr_<exception>, already fixed up for correct return/restart
386 @ r5 - spsr_<exception>
387 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
388 @
389 @ Also, separately save sp_usr and lr_usr
390 @
391 stmia r0, {r4 - r6}
392 ARM( stmdb r0, {sp, lr}^ )
393 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
394
336 @
337 @ We are now ready to fill in the remaining blanks on the stack:
338 @
339 @ r4 - lr_<exception>, already fixed up for correct return/restart
340 @ r5 - spsr_<exception>
341 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
342 @
343 @ Also, separately save sp_usr and lr_usr
344 @
345 stmia r0, {r4 - r6}
346 ARM( stmdb r0, {sp, lr}^ )
347 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
348
395 @
396 @ Enable the alignment trap while in kernel mode
349 @ Enable the alignment trap while in kernel mode
397 @
398 alignment_trap r0, .LCcralign
350 ATRAP( teq r8, r7)
351 ATRAP( mcrne p15, 0, r8, c1, c0, 0)
399
400 @
401 @ Clear FP to mark the first stack frame
402 @
403 zero_fp
404
352
353 @
354 @ Clear FP to mark the first stack frame
355 @
356 zero_fp
357
405 .if \trace
406#ifdef CONFIG_IRQSOFF_TRACER
407 bl trace_hardirqs_off
408#endif
409 ct_user_exit save = 0
358#ifdef CONFIG_IRQSOFF_TRACER
359 bl trace_hardirqs_off
360#endif
361 ct_user_exit save = 0
410 .endif
411 .endm
412
413 .macro kuser_cmpxchg_check
414#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
415 !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
416#ifndef CONFIG_MMU
417#warning "NPTL on non MMU needs fixing"
418#else

--- 312 unchanged lines hidden (view full) ---

731 UNWIND(.cantunwind )
732 get_thread_info tsk
733 mov why, #0
734 b ret_to_user
735 UNWIND(.fnend )
736ENDPROC(__pabt_usr)
737ENDPROC(ret_from_exception)
738
362 .endm
363
364 .macro kuser_cmpxchg_check
365#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
366 !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
367#ifndef CONFIG_MMU
368#warning "NPTL on non MMU needs fixing"
369#else

--- 312 unchanged lines hidden (view full) ---

682 UNWIND(.cantunwind )
683 get_thread_info tsk
684 mov why, #0
685 b ret_to_user
686 UNWIND(.fnend )
687ENDPROC(__pabt_usr)
688ENDPROC(ret_from_exception)
689
739 .align 5
740__fiq_usr:
741 usr_entry trace=0
742 kuser_cmpxchg_check
743 mov r0, sp @ struct pt_regs *regs
744 bl handle_fiq_as_nmi
745 get_thread_info tsk
746 restore_user_regs fast = 0, offset = 0
747 UNWIND(.fnend )
748ENDPROC(__fiq_usr)
749
750/*
751 * Register switch for ARMv3 and ARMv4 processors
752 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
753 * previous and next are guaranteed not to be the same.
754 */
755ENTRY(__switch_to)
756 UNWIND(.fnstart )
757 UNWIND(.cantunwind )

--- 419 unchanged lines hidden (view full) ---

1177 * These aren't too critical.
1178 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1179 */
1180
1181vector_addrexcptn:
1182 b vector_addrexcptn
1183
1184/*=============================================================================
690/*
691 * Register switch for ARMv3 and ARMv4 processors
692 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
693 * previous and next are guaranteed not to be the same.
694 */
695ENTRY(__switch_to)
696 UNWIND(.fnstart )
697 UNWIND(.cantunwind )

--- 419 unchanged lines hidden (view full) ---

1117 * These aren't too critical.
1118 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1119 */
1120
1121vector_addrexcptn:
1122 b vector_addrexcptn
1123
1124/*=============================================================================
1185 * FIQ "NMI" handler
1125 * Undefined FIQs
1186 *-----------------------------------------------------------------------------
1126 *-----------------------------------------------------------------------------
1187 * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
1188 * systems.
1127 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
1128 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
1129 * Basically to switch modes, we *HAVE* to clobber one register... brain
1130 * damage alert! I don't think that we can execute any code in here in any
1131 * other mode than FIQ... Ok you can switch to another mode, but you can't
1132 * get out of that mode without clobbering one register.
1189 */
1133 */
1190 vector_stub fiq, FIQ_MODE, 4
1134vector_fiq:
1135 subs pc, lr, #4
1191
1136
1192 .long __fiq_usr @ 0 (USR_26 / USR_32)
1193 .long __fiq_svc @ 1 (FIQ_26 / FIQ_32)
1194 .long __fiq_svc @ 2 (IRQ_26 / IRQ_32)
1195 .long __fiq_svc @ 3 (SVC_26 / SVC_32)
1196 .long __fiq_svc @ 4
1197 .long __fiq_svc @ 5
1198 .long __fiq_svc @ 6
1199 .long __fiq_abt @ 7
1200 .long __fiq_svc @ 8
1201 .long __fiq_svc @ 9
1202 .long __fiq_svc @ a
1203 .long __fiq_svc @ b
1204 .long __fiq_svc @ c
1205 .long __fiq_svc @ d
1206 .long __fiq_svc @ e
1207 .long __fiq_svc @ f
1208
1209 .globl vector_fiq_offset
1210 .equ vector_fiq_offset, vector_fiq
1211
1212 .section .vectors, "ax", %progbits
1213__vectors_start:
1214 W(b) vector_rst
1215 W(b) vector_und
1216 W(ldr) pc, __vectors_start + 0x1000

--- 17 unchanged lines hidden ---
1137 .globl vector_fiq_offset
1138 .equ vector_fiq_offset, vector_fiq
1139
1140 .section .vectors, "ax", %progbits
1141__vectors_start:
1142 W(b) vector_rst
1143 W(b) vector_und
1144 W(ldr) pc, __vectors_start + 0x1000

--- 17 unchanged lines hidden ---