1#include <linux/init.h> 2#include <linux/linkage.h> 3 4#include <asm/assembler.h> 5#include <asm/asm-offsets.h> 6#include <asm/errno.h> 7#include <asm/thread_info.h> 8#include <asm/v7m.h> 9 10@ Bad Abort numbers 11@ ----------------- 12@ 13#define BAD_PREFETCH 0 14#define BAD_DATA 1 15#define BAD_ADDREXCPTN 2 16#define BAD_IRQ 3 17#define BAD_UNDEFINSTR 4 18 19@ 20@ Most of the stack format comes from struct pt_regs, but with 21@ the addition of 8 bytes for storing syscall args 5 and 6. 22@ This _must_ remain a multiple of 8 for EABI. 23@ 24#define S_OFF 8 25 26/* 27 * The SWI code relies on the fact that R0 is at the bottom of the stack 28 * (due to slow/fast restore user regs). 29 */ 30#if S_R0 != 0 31#error "Please fix" 32#endif 33 34 .macro zero_fp 35#ifdef CONFIG_FRAME_POINTER 36 mov fp, #0 37#endif 38 .endm 39 40#ifdef CONFIG_ALIGNMENT_TRAP 41#define ATRAP(x...) x 42#else 43#define ATRAP(x...) 44#endif 45 46 .macro alignment_trap, rtmp1, rtmp2, label 47#ifdef CONFIG_ALIGNMENT_TRAP 48 mrc p15, 0, \rtmp2, c1, c0, 0 49 ldr \rtmp1, \label 50 ldr \rtmp1, [\rtmp1] 51 teq \rtmp1, \rtmp2 52 mcrne p15, 0, \rtmp1, c1, c0, 0 53#endif 54 .endm 55 56#ifdef CONFIG_CPU_V7M 57/* 58 * ARMv7-M exception entry/exit macros. 59 * 60 * xPSR, ReturnAddress(), LR (R14), R12, R3, R2, R1, and R0 are 61 * automatically saved on the current stack (32 words) before 62 * switching to the exception stack (SP_main). 63 * 64 * If exception is taken while in user mode, SP_main is 65 * empty. Otherwise, SP_main is aligned to 64 bit automatically 66 * (CCR.STKALIGN set). 67 * 68 * Linux assumes that the interrupts are disabled when entering an 69 * exception handler and it may BUG if this is not the case. Interrupts 70 * are disabled during entry and reenabled in the exit macro. 71 * 72 * v7m_exception_slow_exit is used when returning from SVC or PendSV. 73 * When returning to kernel mode, we don't return from exception. 74 */ 75 .macro v7m_exception_entry 76 @ determine the location of the registers saved by the core during 77 @ exception entry. Depending on the mode the cpu was in when the 78 @ exception happend that is either on the main or the process stack. 79 @ Bit 2 of EXC_RETURN stored in the lr register specifies which stack 80 @ was used. 81 tst lr, #EXC_RET_STACK_MASK 82 mrsne r12, psp 83 moveq r12, sp 84 85 @ we cannot rely on r0-r3 and r12 matching the value saved in the 86 @ exception frame because of tail-chaining. So these have to be 87 @ reloaded. 88 ldmia r12!, {r0-r3} 89 90 @ Linux expects to have irqs off. Do it here before taking stack space 91 cpsid i 92 93 sub sp, #S_FRAME_SIZE-S_IP 94 stmdb sp!, {r0-r11} 95 96 @ load saved r12, lr, return address and xPSR. 97 @ r0-r7 are used for signals and never touched from now on. Clobbering 98 @ r8-r12 is OK. 99 mov r9, r12 100 ldmia r9!, {r8, r10-r12} 101 102 @ calculate the original stack pointer value. 103 @ r9 currently points to the memory location just above the auto saved 104 @ xPSR. 105 @ The cpu might automatically 8-byte align the stack. Bit 9 106 @ of the saved xPSR specifies if stack aligning took place. In this case 107 @ another 32-bit value is included in the stack. 108 109 tst r12, V7M_xPSR_FRAMEPTRALIGN 110 addne r9, r9, #4 111 112 @ store saved r12 using str to have a register to hold the base for stm 113 str r8, [sp, #S_IP] 114 add r8, sp, #S_SP 115 @ store r13-r15, xPSR 116 stmia r8!, {r9-r12} 117 @ store old_r0 118 str r0, [r8] 119 .endm 120 121 /* 122 * PENDSV and SVCALL are configured to have the same exception 123 * priorities. As a kernel thread runs at SVCALL execution priority it 124 * can never be preempted and so we will never have to return to a 125 * kernel thread here. 126 */ 127 .macro v7m_exception_slow_exit ret_r0 128 cpsid i 129 ldr lr, =EXC_RET_THREADMODE_PROCESSSTACK 130 131 @ read original r12, sp, lr, pc and xPSR 132 add r12, sp, #S_IP 133 ldmia r12, {r1-r5} 134 135 @ an exception frame is always 8-byte aligned. To tell the hardware if 136 @ the sp to be restored is aligned or not set bit 9 of the saved xPSR 137 @ accordingly. 138 tst r2, #4 139 subne r2, r2, #4 140 orrne r5, V7M_xPSR_FRAMEPTRALIGN 141 biceq r5, V7M_xPSR_FRAMEPTRALIGN 142 143 @ ensure bit 0 is cleared in the PC, otherwise behaviour is 144 @ unpredictable 145 bic r4, #1 146 147 @ write basic exception frame 148 stmdb r2!, {r1, r3-r5} 149 ldmia sp, {r1, r3-r5} 150 .if \ret_r0 151 stmdb r2!, {r0, r3-r5} 152 .else 153 stmdb r2!, {r1, r3-r5} 154 .endif 155 156 @ restore process sp 157 msr psp, r2 158 159 @ restore original r4-r11 160 ldmia sp!, {r0-r11} 161 162 @ restore main sp 163 add sp, sp, #S_FRAME_SIZE-S_IP 164 165 cpsie i 166 bx lr 167 .endm 168#endif /* CONFIG_CPU_V7M */ 169 170 @ 171 @ Store/load the USER SP and LR registers by switching to the SYS 172 @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not 173 @ available. Should only be called from SVC mode 174 @ 175 .macro store_user_sp_lr, rd, rtemp, offset = 0 176 mrs \rtemp, cpsr 177 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) 178 msr cpsr_c, \rtemp @ switch to the SYS mode 179 180 str sp, [\rd, #\offset] @ save sp_usr 181 str lr, [\rd, #\offset + 4] @ save lr_usr 182 183 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) 184 msr cpsr_c, \rtemp @ switch back to the SVC mode 185 .endm 186 187 .macro load_user_sp_lr, rd, rtemp, offset = 0 188 mrs \rtemp, cpsr 189 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) 190 msr cpsr_c, \rtemp @ switch to the SYS mode 191 192 ldr sp, [\rd, #\offset] @ load sp_usr 193 ldr lr, [\rd, #\offset + 4] @ load lr_usr 194 195 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) 196 msr cpsr_c, \rtemp @ switch back to the SVC mode 197 .endm 198 199 200 .macro svc_exit, rpsr, irq = 0 201 .if \irq != 0 202 @ IRQs already off 203#ifdef CONFIG_TRACE_IRQFLAGS 204 @ The parent context IRQs must have been enabled to get here in 205 @ the first place, so there's no point checking the PSR I bit. 206 bl trace_hardirqs_on 207#endif 208 .else 209 @ IRQs off again before pulling preserved data off the stack 210 disable_irq_notrace 211#ifdef CONFIG_TRACE_IRQFLAGS 212 tst \rpsr, #PSR_I_BIT 213 bleq trace_hardirqs_on 214 tst \rpsr, #PSR_I_BIT 215 blne trace_hardirqs_off 216#endif 217 .endif 218 uaccess_restore 219 220#ifndef CONFIG_THUMB2_KERNEL 221 @ ARM mode SVC restore 222 msr spsr_cxsf, \rpsr 223#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) 224 @ We must avoid clrex due to Cortex-A15 erratum #830321 225 sub r0, sp, #4 @ uninhabited address 226 strex r1, r2, [r0] @ clear the exclusive monitor 227#endif 228 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr 229#else 230 @ Thumb mode SVC restore 231 ldr lr, [sp, #S_SP] @ top of the stack 232 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc 233 234 @ We must avoid clrex due to Cortex-A15 erratum #830321 235 strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor 236 237 stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context 238 ldmia sp, {r0 - r12} 239 mov sp, lr 240 ldr lr, [sp], #4 241 rfeia sp! 242#endif 243 .endm 244 245 @ 246 @ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit 247 @ 248 @ This macro acts in a similar manner to svc_exit but switches to FIQ 249 @ mode to restore the final part of the register state. 250 @ 251 @ We cannot use the normal svc_exit procedure because that would 252 @ clobber spsr_svc (FIQ could be delivered during the first few 253 @ instructions of vector_swi meaning its contents have not been 254 @ saved anywhere). 255 @ 256 @ Note that, unlike svc_exit, this macro also does not allow a caller 257 @ supplied rpsr. This is because the FIQ exceptions are not re-entrant 258 @ and the handlers cannot call into the scheduler (meaning the value 259 @ on the stack remains correct). 260 @ 261 .macro svc_exit_via_fiq 262 uaccess_restore 263#ifndef CONFIG_THUMB2_KERNEL 264 @ ARM mode restore 265 mov r0, sp 266 ldmib r0, {r1 - r14} @ abort is deadly from here onward (it will 267 @ clobber state restored below) 268 msr cpsr_c, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT 269 add r8, r0, #S_PC 270 ldr r9, [r0, #S_PSR] 271 msr spsr_cxsf, r9 272 ldr r0, [r0, #S_R0] 273 ldmia r8, {pc}^ 274#else 275 @ Thumb mode restore 276 add r0, sp, #S_R2 277 ldr lr, [sp, #S_LR] 278 ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will 279 @ clobber state restored below) 280 ldmia r0, {r2 - r12} 281 mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT 282 msr cpsr_c, r1 283 sub r0, #S_R2 284 add r8, r0, #S_PC 285 ldmia r0, {r0 - r1} 286 rfeia r8 287#endif 288 .endm 289 290 291 .macro restore_user_regs, fast = 0, offset = 0 292 uaccess_enable r1, isb=0 293#ifndef CONFIG_THUMB2_KERNEL 294 @ ARM mode restore 295 mov r2, sp 296 ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr 297 ldr lr, [r2, #\offset + S_PC]! @ get pc 298 msr spsr_cxsf, r1 @ save in spsr_svc 299#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) 300 @ We must avoid clrex due to Cortex-A15 erratum #830321 301 strex r1, r2, [r2] @ clear the exclusive monitor 302#endif 303 .if \fast 304 ldmdb r2, {r1 - lr}^ @ get calling r1 - lr 305 .else 306 ldmdb r2, {r0 - lr}^ @ get calling r0 - lr 307 .endif 308 mov r0, r0 @ ARMv5T and earlier require a nop 309 @ after ldm {}^ 310 add sp, sp, #\offset + S_FRAME_SIZE 311 movs pc, lr @ return & move spsr_svc into cpsr 312#elif defined(CONFIG_CPU_V7M) 313 @ V7M restore. 314 @ Note that we don't need to do clrex here as clearing the local 315 @ monitor is part of the exception entry and exit sequence. 316 .if \offset 317 add sp, #\offset 318 .endif 319 v7m_exception_slow_exit ret_r0 = \fast 320#else 321 @ Thumb mode restore 322 mov r2, sp 323 load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr 324 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr 325 ldr lr, [sp, #\offset + S_PC] @ get pc 326 add sp, sp, #\offset + S_SP 327 msr spsr_cxsf, r1 @ save in spsr_svc 328 329 @ We must avoid clrex due to Cortex-A15 erratum #830321 330 strex r1, r2, [sp] @ clear the exclusive monitor 331 332 .if \fast 333 ldmdb sp, {r1 - r12} @ get calling r1 - r12 334 .else 335 ldmdb sp, {r0 - r12} @ get calling r0 - r12 336 .endif 337 add sp, sp, #S_FRAME_SIZE - S_SP 338 movs pc, lr @ return & move spsr_svc into cpsr 339#endif /* !CONFIG_THUMB2_KERNEL */ 340 .endm 341 342/* 343 * Context tracking subsystem. Used to instrument transitions 344 * between user and kernel mode. 345 */ 346 .macro ct_user_exit, save = 1 347#ifdef CONFIG_CONTEXT_TRACKING 348 .if \save 349 stmdb sp!, {r0-r3, ip, lr} 350 bl context_tracking_user_exit 351 ldmia sp!, {r0-r3, ip, lr} 352 .else 353 bl context_tracking_user_exit 354 .endif 355#endif 356 .endm 357 358 .macro ct_user_enter, save = 1 359#ifdef CONFIG_CONTEXT_TRACKING 360 .if \save 361 stmdb sp!, {r0-r3, ip, lr} 362 bl context_tracking_user_enter 363 ldmia sp!, {r0-r3, ip, lr} 364 .else 365 bl context_tracking_user_enter 366 .endif 367#endif 368 .endm 369 370/* 371 * These are the registers used in the syscall handler, and allow us to 372 * have in theory up to 7 arguments to a function - r0 to r6. 373 * 374 * r7 is reserved for the system call number for thumb mode. 375 * 376 * Note that tbl == why is intentional. 377 * 378 * We must set at least "tsk" and "why" when calling ret_with_reschedule. 379 */ 380scno .req r7 @ syscall number 381tbl .req r8 @ syscall table pointer 382why .req r8 @ Linux syscall (!= 0) 383tsk .req r9 @ current thread_info 384