1/* 2 * linux/arch/arm/kernel/entry-armv.S 3 * 4 * Copyright (C) 1996,1997,1998 Russell King. 5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk) 6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com) 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * Low-level vector interface routines 13 * 14 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction 15 * that causes it to save wrong values... Be aware! 16 */ 17 18#include <linux/init.h> 19 20#include <asm/assembler.h> 21#include <asm/memory.h> 22#include <asm/glue-df.h> 23#include <asm/glue-pf.h> 24#include <asm/vfpmacros.h> 25#ifndef CONFIG_GENERIC_IRQ_MULTI_HANDLER 26#include <mach/entry-macro.S> 27#endif 28#include <asm/thread_notify.h> 29#include <asm/unwind.h> 30#include <asm/unistd.h> 31#include <asm/tls.h> 32#include <asm/system_info.h> 33 34#include "entry-header.S" 35#include <asm/entry-macro-multi.S> 36#include <asm/probes.h> 37 38/* 39 * Interrupt handling. 40 */ 41 .macro irq_handler 42#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER 43 ldr r1, =handle_arch_irq 44 mov r0, sp 45 badr lr, 9997f 46 ldr pc, [r1] 47#else 48 arch_irq_handler_default 49#endif 509997: 51 .endm 52 53 .macro pabt_helper 54 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5 55#ifdef MULTI_PABORT 56 ldr ip, .LCprocfns 57 mov lr, pc 58 ldr pc, [ip, #PROCESSOR_PABT_FUNC] 59#else 60 bl CPU_PABORT_HANDLER 61#endif 62 .endm 63 64 .macro dabt_helper 65 66 @ 67 @ Call the processor-specific abort handler: 68 @ 69 @ r2 - pt_regs 70 @ r4 - aborted context pc 71 @ r5 - aborted context psr 72 @ 73 @ The abort handler must return the aborted address in r0, and 74 @ the fault status register in r1. r9 must be preserved. 75 @ 76#ifdef MULTI_DABORT 77 ldr ip, .LCprocfns 78 mov lr, pc 79 ldr pc, [ip, #PROCESSOR_DABT_FUNC] 80#else 81 bl CPU_DABORT_HANDLER 82#endif 83 .endm 84 85 .section .entry.text,"ax",%progbits 86 87/* 88 * Invalid mode handlers 89 */ 90 .macro inv_entry, reason 91 sub sp, sp, #PT_REGS_SIZE 92 ARM( stmib sp, {r1 - lr} ) 93 THUMB( stmia sp, {r0 - r12} ) 94 THUMB( str sp, [sp, #S_SP] ) 95 THUMB( str lr, [sp, #S_LR] ) 96 mov r1, #\reason 97 .endm 98 99__pabt_invalid: 100 inv_entry BAD_PREFETCH 101 b common_invalid 102ENDPROC(__pabt_invalid) 103 104__dabt_invalid: 105 inv_entry BAD_DATA 106 b common_invalid 107ENDPROC(__dabt_invalid) 108 109__irq_invalid: 110 inv_entry BAD_IRQ 111 b common_invalid 112ENDPROC(__irq_invalid) 113 114__und_invalid: 115 inv_entry BAD_UNDEFINSTR 116 117 @ 118 @ XXX fall through to common_invalid 119 @ 120 121@ 122@ common_invalid - generic code for failed exception (re-entrant version of handlers) 123@ 124common_invalid: 125 zero_fp 126 127 ldmia r0, {r4 - r6} 128 add r0, sp, #S_PC @ here for interlock avoidance 129 mov r7, #-1 @ "" "" "" "" 130 str r4, [sp] @ save preserved r0 131 stmia r0, {r5 - r7} @ lr_<exception>, 132 @ cpsr_<exception>, "old_r0" 133 134 mov r0, sp 135 b bad_mode 136ENDPROC(__und_invalid) 137 138/* 139 * SVC mode handlers 140 */ 141 142#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) 143#define SPFIX(code...) code 144#else 145#define SPFIX(code...) 146#endif 147 148 .macro svc_entry, stack_hole=0, trace=1, uaccess=1 149 UNWIND(.fnstart ) 150 UNWIND(.save {r0 - pc} ) 151 sub sp, sp, #(SVC_REGS_SIZE + \stack_hole - 4) 152#ifdef CONFIG_THUMB2_KERNEL 153 SPFIX( str r0, [sp] ) @ temporarily saved 154 SPFIX( mov r0, sp ) 155 SPFIX( tst r0, #4 ) @ test original stack alignment 156 SPFIX( ldr r0, [sp] ) @ restored 157#else 158 SPFIX( tst sp, #4 ) 159#endif 160 SPFIX( subeq sp, sp, #4 ) 161 stmia sp, {r1 - r12} 162 163 ldmia r0, {r3 - r5} 164 add r7, sp, #S_SP - 4 @ here for interlock avoidance 165 mov r6, #-1 @ "" "" "" "" 166 add r2, sp, #(SVC_REGS_SIZE + \stack_hole - 4) 167 SPFIX( addeq r2, r2, #4 ) 168 str r3, [sp, #-4]! @ save the "real" r0 copied 169 @ from the exception stack 170 171 mov r3, lr 172 173 @ 174 @ We are now ready to fill in the remaining blanks on the stack: 175 @ 176 @ r2 - sp_svc 177 @ r3 - lr_svc 178 @ r4 - lr_<exception>, already fixed up for correct return/restart 179 @ r5 - spsr_<exception> 180 @ r6 - orig_r0 (see pt_regs definition in ptrace.h) 181 @ 182 stmia r7, {r2 - r6} 183 184 get_thread_info tsk 185 ldr r0, [tsk, #TI_ADDR_LIMIT] 186 mov r1, #TASK_SIZE 187 str r1, [tsk, #TI_ADDR_LIMIT] 188 str r0, [sp, #SVC_ADDR_LIMIT] 189 190 uaccess_save r0 191 .if \uaccess 192 uaccess_disable r0 193 .endif 194 195 .if \trace 196#ifdef CONFIG_TRACE_IRQFLAGS 197 bl trace_hardirqs_off 198#endif 199 .endif 200 .endm 201 202 .align 5 203__dabt_svc: 204 svc_entry uaccess=0 205 mov r2, sp 206 dabt_helper 207 THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR 208 svc_exit r5 @ return from exception 209 UNWIND(.fnend ) 210ENDPROC(__dabt_svc) 211 212 .align 5 213__irq_svc: 214 svc_entry 215 irq_handler 216 217#ifdef CONFIG_PREEMPT 218 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count 219 ldr r0, [tsk, #TI_FLAGS] @ get flags 220 teq r8, #0 @ if preempt count != 0 221 movne r0, #0 @ force flags to 0 222 tst r0, #_TIF_NEED_RESCHED 223 blne svc_preempt 224#endif 225 226 svc_exit r5, irq = 1 @ return from exception 227 UNWIND(.fnend ) 228ENDPROC(__irq_svc) 229 230 .ltorg 231 232#ifdef CONFIG_PREEMPT 233svc_preempt: 234 mov r8, lr 2351: bl preempt_schedule_irq @ irq en/disable is done inside 236 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS 237 tst r0, #_TIF_NEED_RESCHED 238 reteq r8 @ go again 239 b 1b 240#endif 241 242__und_fault: 243 @ Correct the PC such that it is pointing at the instruction 244 @ which caused the fault. If the faulting instruction was ARM 245 @ the PC will be pointing at the next instruction, and have to 246 @ subtract 4. Otherwise, it is Thumb, and the PC will be 247 @ pointing at the second half of the Thumb instruction. We 248 @ have to subtract 2. 249 ldr r2, [r0, #S_PC] 250 sub r2, r2, r1 251 str r2, [r0, #S_PC] 252 b do_undefinstr 253ENDPROC(__und_fault) 254 255 .align 5 256__und_svc: 257#ifdef CONFIG_KPROBES 258 @ If a kprobe is about to simulate a "stmdb sp..." instruction, 259 @ it obviously needs free stack space which then will belong to 260 @ the saved context. 261 svc_entry MAX_STACK_SIZE 262#else 263 svc_entry 264#endif 265 @ 266 @ call emulation code, which returns using r9 if it has emulated 267 @ the instruction, or the more conventional lr if we are to treat 268 @ this as a real undefined instruction 269 @ 270 @ r0 - instruction 271 @ 272#ifndef CONFIG_THUMB2_KERNEL 273 ldr r0, [r4, #-4] 274#else 275 mov r1, #2 276 ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2 277 cmp r0, #0xe800 @ 32-bit instruction if xx >= 0 278 blo __und_svc_fault 279 ldrh r9, [r4] @ bottom 16 bits 280 add r4, r4, #2 281 str r4, [sp, #S_PC] 282 orr r0, r9, r0, lsl #16 283#endif 284 badr r9, __und_svc_finish 285 mov r2, r4 286 bl call_fpe 287 288 mov r1, #4 @ PC correction to apply 289__und_svc_fault: 290 mov r0, sp @ struct pt_regs *regs 291 bl __und_fault 292 293__und_svc_finish: 294 get_thread_info tsk 295 ldr r5, [sp, #S_PSR] @ Get SVC cpsr 296 svc_exit r5 @ return from exception 297 UNWIND(.fnend ) 298ENDPROC(__und_svc) 299 300 .align 5 301__pabt_svc: 302 svc_entry 303 mov r2, sp @ regs 304 pabt_helper 305 svc_exit r5 @ return from exception 306 UNWIND(.fnend ) 307ENDPROC(__pabt_svc) 308 309 .align 5 310__fiq_svc: 311 svc_entry trace=0 312 mov r0, sp @ struct pt_regs *regs 313 bl handle_fiq_as_nmi 314 svc_exit_via_fiq 315 UNWIND(.fnend ) 316ENDPROC(__fiq_svc) 317 318 .align 5 319.LCcralign: 320 .word cr_alignment 321#ifdef MULTI_DABORT 322.LCprocfns: 323 .word processor 324#endif 325.LCfp: 326 .word fp_enter 327 328/* 329 * Abort mode handlers 330 */ 331 332@ 333@ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode 334@ and reuses the same macros. However in abort mode we must also 335@ save/restore lr_abt and spsr_abt to make nested aborts safe. 336@ 337 .align 5 338__fiq_abt: 339 svc_entry trace=0 340 341 ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT ) 342 THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT ) 343 THUMB( msr cpsr_c, r0 ) 344 mov r1, lr @ Save lr_abt 345 mrs r2, spsr @ Save spsr_abt, abort is now safe 346 ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT ) 347 THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT ) 348 THUMB( msr cpsr_c, r0 ) 349 stmfd sp!, {r1 - r2} 350 351 add r0, sp, #8 @ struct pt_regs *regs 352 bl handle_fiq_as_nmi 353 354 ldmfd sp!, {r1 - r2} 355 ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT ) 356 THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT ) 357 THUMB( msr cpsr_c, r0 ) 358 mov lr, r1 @ Restore lr_abt, abort is unsafe 359 msr spsr_cxsf, r2 @ Restore spsr_abt 360 ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT ) 361 THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT ) 362 THUMB( msr cpsr_c, r0 ) 363 364 svc_exit_via_fiq 365 UNWIND(.fnend ) 366ENDPROC(__fiq_abt) 367 368/* 369 * User mode handlers 370 * 371 * EABI note: sp_svc is always 64-bit aligned here, so should PT_REGS_SIZE 372 */ 373 374#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (PT_REGS_SIZE & 7) 375#error "sizeof(struct pt_regs) must be a multiple of 8" 376#endif 377 378 .macro usr_entry, trace=1, uaccess=1 379 UNWIND(.fnstart ) 380 UNWIND(.cantunwind ) @ don't unwind the user space 381 sub sp, sp, #PT_REGS_SIZE 382 ARM( stmib sp, {r1 - r12} ) 383 THUMB( stmia sp, {r0 - r12} ) 384 385 ATRAP( mrc p15, 0, r7, c1, c0, 0) 386 ATRAP( ldr r8, .LCcralign) 387 388 ldmia r0, {r3 - r5} 389 add r0, sp, #S_PC @ here for interlock avoidance 390 mov r6, #-1 @ "" "" "" "" 391 392 str r3, [sp] @ save the "real" r0 copied 393 @ from the exception stack 394 395 ATRAP( ldr r8, [r8, #0]) 396 397 @ 398 @ We are now ready to fill in the remaining blanks on the stack: 399 @ 400 @ r4 - lr_<exception>, already fixed up for correct return/restart 401 @ r5 - spsr_<exception> 402 @ r6 - orig_r0 (see pt_regs definition in ptrace.h) 403 @ 404 @ Also, separately save sp_usr and lr_usr 405 @ 406 stmia r0, {r4 - r6} 407 ARM( stmdb r0, {sp, lr}^ ) 408 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) 409 410 .if \uaccess 411 uaccess_disable ip 412 .endif 413 414 @ Enable the alignment trap while in kernel mode 415 ATRAP( teq r8, r7) 416 ATRAP( mcrne p15, 0, r8, c1, c0, 0) 417 418 @ 419 @ Clear FP to mark the first stack frame 420 @ 421 zero_fp 422 423 .if \trace 424#ifdef CONFIG_TRACE_IRQFLAGS 425 bl trace_hardirqs_off 426#endif 427 ct_user_exit save = 0 428 .endif 429 .endm 430 431 .macro kuser_cmpxchg_check 432#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) 433#ifndef CONFIG_MMU 434#warning "NPTL on non MMU needs fixing" 435#else 436 @ Make sure our user space atomic helper is restarted 437 @ if it was interrupted in a critical region. Here we 438 @ perform a quick test inline since it should be false 439 @ 99.9999% of the time. The rest is done out of line. 440 cmp r4, #TASK_SIZE 441 blhs kuser_cmpxchg64_fixup 442#endif 443#endif 444 .endm 445 446 .align 5 447__dabt_usr: 448 usr_entry uaccess=0 449 kuser_cmpxchg_check 450 mov r2, sp 451 dabt_helper 452 b ret_from_exception 453 UNWIND(.fnend ) 454ENDPROC(__dabt_usr) 455 456 .align 5 457__irq_usr: 458 usr_entry 459 kuser_cmpxchg_check 460 irq_handler 461 get_thread_info tsk 462 mov why, #0 463 b ret_to_user_from_irq 464 UNWIND(.fnend ) 465ENDPROC(__irq_usr) 466 467 .ltorg 468 469 .align 5 470__und_usr: 471 usr_entry uaccess=0 472 473 mov r2, r4 474 mov r3, r5 475 476 @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the 477 @ faulting instruction depending on Thumb mode. 478 @ r3 = regs->ARM_cpsr 479 @ 480 @ The emulation code returns using r9 if it has emulated the 481 @ instruction, or the more conventional lr if we are to treat 482 @ this as a real undefined instruction 483 @ 484 badr r9, ret_from_exception 485 486 @ IRQs must be enabled before attempting to read the instruction from 487 @ user space since that could cause a page/translation fault if the 488 @ page table was modified by another CPU. 489 enable_irq 490 491 tst r3, #PSR_T_BIT @ Thumb mode? 492 bne __und_usr_thumb 493 sub r4, r2, #4 @ ARM instr at LR - 4 4941: ldrt r0, [r4] 495 ARM_BE8(rev r0, r0) @ little endian instruction 496 497 uaccess_disable ip 498 499 @ r0 = 32-bit ARM instruction which caused the exception 500 @ r2 = PC value for the following instruction (:= regs->ARM_pc) 501 @ r4 = PC value for the faulting instruction 502 @ lr = 32-bit undefined instruction function 503 badr lr, __und_usr_fault_32 504 b call_fpe 505 506__und_usr_thumb: 507 @ Thumb instruction 508 sub r4, r2, #2 @ First half of thumb instr at LR - 2 509#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 510/* 511 * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms 512 * can never be supported in a single kernel, this code is not applicable at 513 * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be 514 * made about .arch directives. 515 */ 516#if __LINUX_ARM_ARCH__ < 7 517/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */ 518#define NEED_CPU_ARCHITECTURE 519 ldr r5, .LCcpu_architecture 520 ldr r5, [r5] 521 cmp r5, #CPU_ARCH_ARMv7 522 blo __und_usr_fault_16 @ 16bit undefined instruction 523/* 524 * The following code won't get run unless the running CPU really is v7, so 525 * coding round the lack of ldrht on older arches is pointless. Temporarily 526 * override the assembler target arch with the minimum required instead: 527 */ 528 .arch armv6t2 529#endif 5302: ldrht r5, [r4] 531ARM_BE8(rev16 r5, r5) @ little endian instruction 532 cmp r5, #0xe800 @ 32bit instruction if xx != 0 533 blo __und_usr_fault_16_pan @ 16bit undefined instruction 5343: ldrht r0, [r2] 535ARM_BE8(rev16 r0, r0) @ little endian instruction 536 uaccess_disable ip 537 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 538 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update 539 orr r0, r0, r5, lsl #16 540 badr lr, __und_usr_fault_32 541 @ r0 = the two 16-bit Thumb instructions which caused the exception 542 @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc) 543 @ r4 = PC value for the first 16-bit Thumb instruction 544 @ lr = 32bit undefined instruction function 545 546#if __LINUX_ARM_ARCH__ < 7 547/* If the target arch was overridden, change it back: */ 548#ifdef CONFIG_CPU_32v6K 549 .arch armv6k 550#else 551 .arch armv6 552#endif 553#endif /* __LINUX_ARM_ARCH__ < 7 */ 554#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */ 555 b __und_usr_fault_16 556#endif 557 UNWIND(.fnend) 558ENDPROC(__und_usr) 559 560/* 561 * The out of line fixup for the ldrt instructions above. 562 */ 563 .pushsection .text.fixup, "ax" 564 .align 2 5654: str r4, [sp, #S_PC] @ retry current instruction 566 ret r9 567 .popsection 568 .pushsection __ex_table,"a" 569 .long 1b, 4b 570#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 571 .long 2b, 4b 572 .long 3b, 4b 573#endif 574 .popsection 575 576/* 577 * Check whether the instruction is a co-processor instruction. 578 * If yes, we need to call the relevant co-processor handler. 579 * 580 * Note that we don't do a full check here for the co-processor 581 * instructions; all instructions with bit 27 set are well 582 * defined. The only instructions that should fault are the 583 * co-processor instructions. However, we have to watch out 584 * for the ARM6/ARM7 SWI bug. 585 * 586 * NEON is a special case that has to be handled here. Not all 587 * NEON instructions are co-processor instructions, so we have 588 * to make a special case of checking for them. Plus, there's 589 * five groups of them, so we have a table of mask/opcode pairs 590 * to check against, and if any match then we branch off into the 591 * NEON handler code. 592 * 593 * Emulators may wish to make use of the following registers: 594 * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) 595 * r2 = PC value to resume execution after successful emulation 596 * r9 = normal "successful" return address 597 * r10 = this threads thread_info structure 598 * lr = unrecognised instruction return address 599 * IRQs enabled, FIQs enabled. 600 */ 601 @ 602 @ Fall-through from Thumb-2 __und_usr 603 @ 604#ifdef CONFIG_NEON 605 get_thread_info r10 @ get current thread 606 adr r6, .LCneon_thumb_opcodes 607 b 2f 608#endif 609call_fpe: 610 get_thread_info r10 @ get current thread 611#ifdef CONFIG_NEON 612 adr r6, .LCneon_arm_opcodes 6132: ldr r5, [r6], #4 @ mask value 614 ldr r7, [r6], #4 @ opcode bits matching in mask 615 cmp r5, #0 @ end mask? 616 beq 1f 617 and r8, r0, r5 618 cmp r8, r7 @ NEON instruction? 619 bne 2b 620 mov r7, #1 621 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used 622 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used 623 b do_vfp @ let VFP handler handle this 6241: 625#endif 626 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 627 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 628 reteq lr 629 and r8, r0, #0x00000f00 @ mask out CP number 630 THUMB( lsr r8, r8, #8 ) 631 mov r7, #1 632 add r6, r10, #TI_USED_CP 633 ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[] 634 THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[] 635#ifdef CONFIG_IWMMXT 636 @ Test if we need to give access to iWMMXt coprocessors 637 ldr r5, [r10, #TI_FLAGS] 638 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only 639 movscs r7, r5, lsr #(TIF_USING_IWMMXT + 1) 640 bcs iwmmxt_task_enable 641#endif 642 ARM( add pc, pc, r8, lsr #6 ) 643 THUMB( lsl r8, r8, #2 ) 644 THUMB( add pc, r8 ) 645 nop 646 647 ret.w lr @ CP#0 648 W(b) do_fpe @ CP#1 (FPE) 649 W(b) do_fpe @ CP#2 (FPE) 650 ret.w lr @ CP#3 651#ifdef CONFIG_CRUNCH 652 b crunch_task_enable @ CP#4 (MaverickCrunch) 653 b crunch_task_enable @ CP#5 (MaverickCrunch) 654 b crunch_task_enable @ CP#6 (MaverickCrunch) 655#else 656 ret.w lr @ CP#4 657 ret.w lr @ CP#5 658 ret.w lr @ CP#6 659#endif 660 ret.w lr @ CP#7 661 ret.w lr @ CP#8 662 ret.w lr @ CP#9 663#ifdef CONFIG_VFP 664 W(b) do_vfp @ CP#10 (VFP) 665 W(b) do_vfp @ CP#11 (VFP) 666#else 667 ret.w lr @ CP#10 (VFP) 668 ret.w lr @ CP#11 (VFP) 669#endif 670 ret.w lr @ CP#12 671 ret.w lr @ CP#13 672 ret.w lr @ CP#14 (Debug) 673 ret.w lr @ CP#15 (Control) 674 675#ifdef NEED_CPU_ARCHITECTURE 676 .align 2 677.LCcpu_architecture: 678 .word __cpu_architecture 679#endif 680 681#ifdef CONFIG_NEON 682 .align 6 683 684.LCneon_arm_opcodes: 685 .word 0xfe000000 @ mask 686 .word 0xf2000000 @ opcode 687 688 .word 0xff100000 @ mask 689 .word 0xf4000000 @ opcode 690 691 .word 0x00000000 @ mask 692 .word 0x00000000 @ opcode 693 694.LCneon_thumb_opcodes: 695 .word 0xef000000 @ mask 696 .word 0xef000000 @ opcode 697 698 .word 0xff100000 @ mask 699 .word 0xf9000000 @ opcode 700 701 .word 0x00000000 @ mask 702 .word 0x00000000 @ opcode 703#endif 704 705do_fpe: 706 ldr r4, .LCfp 707 add r10, r10, #TI_FPSTATE @ r10 = workspace 708 ldr pc, [r4] @ Call FP module USR entry point 709 710/* 711 * The FP module is called with these registers set: 712 * r0 = instruction 713 * r2 = PC+4 714 * r9 = normal "successful" return address 715 * r10 = FP workspace 716 * lr = unrecognised FP instruction return address 717 */ 718 719 .pushsection .data 720 .align 2 721ENTRY(fp_enter) 722 .word no_fp 723 .popsection 724 725ENTRY(no_fp) 726 ret lr 727ENDPROC(no_fp) 728 729__und_usr_fault_32: 730 mov r1, #4 731 b 1f 732__und_usr_fault_16_pan: 733 uaccess_disable ip 734__und_usr_fault_16: 735 mov r1, #2 7361: mov r0, sp 737 badr lr, ret_from_exception 738 b __und_fault 739ENDPROC(__und_usr_fault_32) 740ENDPROC(__und_usr_fault_16) 741 742 .align 5 743__pabt_usr: 744 usr_entry 745 mov r2, sp @ regs 746 pabt_helper 747 UNWIND(.fnend ) 748 /* fall through */ 749/* 750 * This is the return code to user mode for abort handlers 751 */ 752ENTRY(ret_from_exception) 753 UNWIND(.fnstart ) 754 UNWIND(.cantunwind ) 755 get_thread_info tsk 756 mov why, #0 757 b ret_to_user 758 UNWIND(.fnend ) 759ENDPROC(__pabt_usr) 760ENDPROC(ret_from_exception) 761 762 .align 5 763__fiq_usr: 764 usr_entry trace=0 765 kuser_cmpxchg_check 766 mov r0, sp @ struct pt_regs *regs 767 bl handle_fiq_as_nmi 768 get_thread_info tsk 769 restore_user_regs fast = 0, offset = 0 770 UNWIND(.fnend ) 771ENDPROC(__fiq_usr) 772 773/* 774 * Register switch for ARMv3 and ARMv4 processors 775 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info 776 * previous and next are guaranteed not to be the same. 777 */ 778ENTRY(__switch_to) 779 UNWIND(.fnstart ) 780 UNWIND(.cantunwind ) 781 add ip, r1, #TI_CPU_SAVE 782 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack 783 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack 784 THUMB( str sp, [ip], #4 ) 785 THUMB( str lr, [ip], #4 ) 786 ldr r4, [r2, #TI_TP_VALUE] 787 ldr r5, [r2, #TI_TP_VALUE + 4] 788#ifdef CONFIG_CPU_USE_DOMAINS 789 mrc p15, 0, r6, c3, c0, 0 @ Get domain register 790 str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register 791 ldr r6, [r2, #TI_CPU_DOMAIN] 792#endif 793 switch_tls r1, r4, r5, r3, r7 794#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) 795 ldr r7, [r2, #TI_TASK] 796 ldr r8, =__stack_chk_guard 797 .if (TSK_STACK_CANARY > IMM12_MASK) 798 add r7, r7, #TSK_STACK_CANARY & ~IMM12_MASK 799 .endif 800 ldr r7, [r7, #TSK_STACK_CANARY & IMM12_MASK] 801#endif 802#ifdef CONFIG_CPU_USE_DOMAINS 803 mcr p15, 0, r6, c3, c0, 0 @ Set domain register 804#endif 805 mov r5, r0 806 add r4, r2, #TI_CPU_SAVE 807 ldr r0, =thread_notify_head 808 mov r1, #THREAD_NOTIFY_SWITCH 809 bl atomic_notifier_call_chain 810#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) 811 str r7, [r8] 812#endif 813 THUMB( mov ip, r4 ) 814 mov r0, r5 815 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously 816 THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously 817 THUMB( ldr sp, [ip], #4 ) 818 THUMB( ldr pc, [ip] ) 819 UNWIND(.fnend ) 820ENDPROC(__switch_to) 821 822 __INIT 823 824/* 825 * User helpers. 826 * 827 * Each segment is 32-byte aligned and will be moved to the top of the high 828 * vector page. New segments (if ever needed) must be added in front of 829 * existing ones. This mechanism should be used only for things that are 830 * really small and justified, and not be abused freely. 831 * 832 * See Documentation/arm/kernel_user_helpers.txt for formal definitions. 833 */ 834 THUMB( .arm ) 835 836 .macro usr_ret, reg 837#ifdef CONFIG_ARM_THUMB 838 bx \reg 839#else 840 ret \reg 841#endif 842 .endm 843 844 .macro kuser_pad, sym, size 845 .if (. - \sym) & 3 846 .rept 4 - (. - \sym) & 3 847 .byte 0 848 .endr 849 .endif 850 .rept (\size - (. - \sym)) / 4 851 .word 0xe7fddef1 852 .endr 853 .endm 854 855#ifdef CONFIG_KUSER_HELPERS 856 .align 5 857 .globl __kuser_helper_start 858__kuser_helper_start: 859 860/* 861 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular 862 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point. 863 */ 864 865__kuser_cmpxchg64: @ 0xffff0f60 866 867#if defined(CONFIG_CPU_32v6K) 868 869 stmfd sp!, {r4, r5, r6, r7} 870 ldrd r4, r5, [r0] @ load old val 871 ldrd r6, r7, [r1] @ load new val 872 smp_dmb arm 8731: ldrexd r0, r1, [r2] @ load current val 874 eors r3, r0, r4 @ compare with oldval (1) 875 eorseq r3, r1, r5 @ compare with oldval (2) 876 strexdeq r3, r6, r7, [r2] @ store newval if eq 877 teqeq r3, #1 @ success? 878 beq 1b @ if no then retry 879 smp_dmb arm 880 rsbs r0, r3, #0 @ set returned val and C flag 881 ldmfd sp!, {r4, r5, r6, r7} 882 usr_ret lr 883 884#elif !defined(CONFIG_SMP) 885 886#ifdef CONFIG_MMU 887 888 /* 889 * The only thing that can break atomicity in this cmpxchg64 890 * implementation is either an IRQ or a data abort exception 891 * causing another process/thread to be scheduled in the middle of 892 * the critical sequence. The same strategy as for cmpxchg is used. 893 */ 894 stmfd sp!, {r4, r5, r6, lr} 895 ldmia r0, {r4, r5} @ load old val 896 ldmia r1, {r6, lr} @ load new val 8971: ldmia r2, {r0, r1} @ load current val 898 eors r3, r0, r4 @ compare with oldval (1) 899 eorseq r3, r1, r5 @ compare with oldval (2) 9002: stmiaeq r2, {r6, lr} @ store newval if eq 901 rsbs r0, r3, #0 @ set return val and C flag 902 ldmfd sp!, {r4, r5, r6, pc} 903 904 .text 905kuser_cmpxchg64_fixup: 906 @ Called from kuser_cmpxchg_fixup. 907 @ r4 = address of interrupted insn (must be preserved). 908 @ sp = saved regs. r7 and r8 are clobbered. 909 @ 1b = first critical insn, 2b = last critical insn. 910 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b. 911 mov r7, #0xffff0fff 912 sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64))) 913 subs r8, r4, r7 914 rsbscs r8, r8, #(2b - 1b) 915 strcs r7, [sp, #S_PC] 916#if __LINUX_ARM_ARCH__ < 6 917 bcc kuser_cmpxchg32_fixup 918#endif 919 ret lr 920 .previous 921 922#else 923#warning "NPTL on non MMU needs fixing" 924 mov r0, #-1 925 adds r0, r0, #0 926 usr_ret lr 927#endif 928 929#else 930#error "incoherent kernel configuration" 931#endif 932 933 kuser_pad __kuser_cmpxchg64, 64 934 935__kuser_memory_barrier: @ 0xffff0fa0 936 smp_dmb arm 937 usr_ret lr 938 939 kuser_pad __kuser_memory_barrier, 32 940 941__kuser_cmpxchg: @ 0xffff0fc0 942 943#if __LINUX_ARM_ARCH__ < 6 944 945#ifdef CONFIG_MMU 946 947 /* 948 * The only thing that can break atomicity in this cmpxchg 949 * implementation is either an IRQ or a data abort exception 950 * causing another process/thread to be scheduled in the middle 951 * of the critical sequence. To prevent this, code is added to 952 * the IRQ and data abort exception handlers to set the pc back 953 * to the beginning of the critical section if it is found to be 954 * within that critical section (see kuser_cmpxchg_fixup). 955 */ 9561: ldr r3, [r2] @ load current val 957 subs r3, r3, r0 @ compare with oldval 9582: streq r1, [r2] @ store newval if eq 959 rsbs r0, r3, #0 @ set return val and C flag 960 usr_ret lr 961 962 .text 963kuser_cmpxchg32_fixup: 964 @ Called from kuser_cmpxchg_check macro. 965 @ r4 = address of interrupted insn (must be preserved). 966 @ sp = saved regs. r7 and r8 are clobbered. 967 @ 1b = first critical insn, 2b = last critical insn. 968 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b. 969 mov r7, #0xffff0fff 970 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) 971 subs r8, r4, r7 972 rsbscs r8, r8, #(2b - 1b) 973 strcs r7, [sp, #S_PC] 974 ret lr 975 .previous 976 977#else 978#warning "NPTL on non MMU needs fixing" 979 mov r0, #-1 980 adds r0, r0, #0 981 usr_ret lr 982#endif 983 984#else 985 986 smp_dmb arm 9871: ldrex r3, [r2] 988 subs r3, r3, r0 989 strexeq r3, r1, [r2] 990 teqeq r3, #1 991 beq 1b 992 rsbs r0, r3, #0 993 /* beware -- each __kuser slot must be 8 instructions max */ 994 ALT_SMP(b __kuser_memory_barrier) 995 ALT_UP(usr_ret lr) 996 997#endif 998 999 kuser_pad __kuser_cmpxchg, 32 1000 1001__kuser_get_tls: @ 0xffff0fe0 1002 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init 1003 usr_ret lr 1004 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code 1005 kuser_pad __kuser_get_tls, 16 1006 .rep 3 1007 .word 0 @ 0xffff0ff0 software TLS value, then 1008 .endr @ pad up to __kuser_helper_version 1009 1010__kuser_helper_version: @ 0xffff0ffc 1011 .word ((__kuser_helper_end - __kuser_helper_start) >> 5) 1012 1013 .globl __kuser_helper_end 1014__kuser_helper_end: 1015 1016#endif 1017 1018 THUMB( .thumb ) 1019 1020/* 1021 * Vector stubs. 1022 * 1023 * This code is copied to 0xffff1000 so we can use branches in the 1024 * vectors, rather than ldr's. Note that this code must not exceed 1025 * a page size. 1026 * 1027 * Common stub entry macro: 1028 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC 1029 * 1030 * SP points to a minimal amount of processor-private memory, the address 1031 * of which is copied into r0 for the mode specific abort handler. 1032 */ 1033 .macro vector_stub, name, mode, correction=0 1034 .align 5 1035 1036vector_\name: 1037 .if \correction 1038 sub lr, lr, #\correction 1039 .endif 1040 1041 @ 1042 @ Save r0, lr_<exception> (parent PC) and spsr_<exception> 1043 @ (parent CPSR) 1044 @ 1045 stmia sp, {r0, lr} @ save r0, lr 1046 mrs lr, spsr 1047 str lr, [sp, #8] @ save spsr 1048 1049 @ 1050 @ Prepare for SVC32 mode. IRQs remain disabled. 1051 @ 1052 mrs r0, cpsr 1053 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE) 1054 msr spsr_cxsf, r0 1055 1056 @ 1057 @ the branch table must immediately follow this code 1058 @ 1059 and lr, lr, #0x0f 1060 THUMB( adr r0, 1f ) 1061 THUMB( ldr lr, [r0, lr, lsl #2] ) 1062 mov r0, sp 1063 ARM( ldr lr, [pc, lr, lsl #2] ) 1064 movs pc, lr @ branch to handler in SVC mode 1065ENDPROC(vector_\name) 1066 1067 .align 2 1068 @ handler addresses follow this label 10691: 1070 .endm 1071 1072 .section .stubs, "ax", %progbits 1073 @ This must be the first word 1074 .word vector_swi 1075 1076vector_rst: 1077 ARM( swi SYS_ERROR0 ) 1078 THUMB( svc #0 ) 1079 THUMB( nop ) 1080 b vector_und 1081 1082/* 1083 * Interrupt dispatcher 1084 */ 1085 vector_stub irq, IRQ_MODE, 4 1086 1087 .long __irq_usr @ 0 (USR_26 / USR_32) 1088 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32) 1089 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32) 1090 .long __irq_svc @ 3 (SVC_26 / SVC_32) 1091 .long __irq_invalid @ 4 1092 .long __irq_invalid @ 5 1093 .long __irq_invalid @ 6 1094 .long __irq_invalid @ 7 1095 .long __irq_invalid @ 8 1096 .long __irq_invalid @ 9 1097 .long __irq_invalid @ a 1098 .long __irq_invalid @ b 1099 .long __irq_invalid @ c 1100 .long __irq_invalid @ d 1101 .long __irq_invalid @ e 1102 .long __irq_invalid @ f 1103 1104/* 1105 * Data abort dispatcher 1106 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC 1107 */ 1108 vector_stub dabt, ABT_MODE, 8 1109 1110 .long __dabt_usr @ 0 (USR_26 / USR_32) 1111 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32) 1112 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32) 1113 .long __dabt_svc @ 3 (SVC_26 / SVC_32) 1114 .long __dabt_invalid @ 4 1115 .long __dabt_invalid @ 5 1116 .long __dabt_invalid @ 6 1117 .long __dabt_invalid @ 7 1118 .long __dabt_invalid @ 8 1119 .long __dabt_invalid @ 9 1120 .long __dabt_invalid @ a 1121 .long __dabt_invalid @ b 1122 .long __dabt_invalid @ c 1123 .long __dabt_invalid @ d 1124 .long __dabt_invalid @ e 1125 .long __dabt_invalid @ f 1126 1127/* 1128 * Prefetch abort dispatcher 1129 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC 1130 */ 1131 vector_stub pabt, ABT_MODE, 4 1132 1133 .long __pabt_usr @ 0 (USR_26 / USR_32) 1134 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32) 1135 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32) 1136 .long __pabt_svc @ 3 (SVC_26 / SVC_32) 1137 .long __pabt_invalid @ 4 1138 .long __pabt_invalid @ 5 1139 .long __pabt_invalid @ 6 1140 .long __pabt_invalid @ 7 1141 .long __pabt_invalid @ 8 1142 .long __pabt_invalid @ 9 1143 .long __pabt_invalid @ a 1144 .long __pabt_invalid @ b 1145 .long __pabt_invalid @ c 1146 .long __pabt_invalid @ d 1147 .long __pabt_invalid @ e 1148 .long __pabt_invalid @ f 1149 1150/* 1151 * Undef instr entry dispatcher 1152 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC 1153 */ 1154 vector_stub und, UND_MODE 1155 1156 .long __und_usr @ 0 (USR_26 / USR_32) 1157 .long __und_invalid @ 1 (FIQ_26 / FIQ_32) 1158 .long __und_invalid @ 2 (IRQ_26 / IRQ_32) 1159 .long __und_svc @ 3 (SVC_26 / SVC_32) 1160 .long __und_invalid @ 4 1161 .long __und_invalid @ 5 1162 .long __und_invalid @ 6 1163 .long __und_invalid @ 7 1164 .long __und_invalid @ 8 1165 .long __und_invalid @ 9 1166 .long __und_invalid @ a 1167 .long __und_invalid @ b 1168 .long __und_invalid @ c 1169 .long __und_invalid @ d 1170 .long __und_invalid @ e 1171 .long __und_invalid @ f 1172 1173 .align 5 1174 1175/*============================================================================= 1176 * Address exception handler 1177 *----------------------------------------------------------------------------- 1178 * These aren't too critical. 1179 * (they're not supposed to happen, and won't happen in 32-bit data mode). 1180 */ 1181 1182vector_addrexcptn: 1183 b vector_addrexcptn 1184 1185/*============================================================================= 1186 * FIQ "NMI" handler 1187 *----------------------------------------------------------------------------- 1188 * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86 1189 * systems. 1190 */ 1191 vector_stub fiq, FIQ_MODE, 4 1192 1193 .long __fiq_usr @ 0 (USR_26 / USR_32) 1194 .long __fiq_svc @ 1 (FIQ_26 / FIQ_32) 1195 .long __fiq_svc @ 2 (IRQ_26 / IRQ_32) 1196 .long __fiq_svc @ 3 (SVC_26 / SVC_32) 1197 .long __fiq_svc @ 4 1198 .long __fiq_svc @ 5 1199 .long __fiq_svc @ 6 1200 .long __fiq_abt @ 7 1201 .long __fiq_svc @ 8 1202 .long __fiq_svc @ 9 1203 .long __fiq_svc @ a 1204 .long __fiq_svc @ b 1205 .long __fiq_svc @ c 1206 .long __fiq_svc @ d 1207 .long __fiq_svc @ e 1208 .long __fiq_svc @ f 1209 1210 .globl vector_fiq 1211 1212 .section .vectors, "ax", %progbits 1213.L__vectors_start: 1214 W(b) vector_rst 1215 W(b) vector_und 1216 W(ldr) pc, .L__vectors_start + 0x1000 1217 W(b) vector_pabt 1218 W(b) vector_dabt 1219 W(b) vector_addrexcptn 1220 W(b) vector_irq 1221 W(b) vector_fiq 1222 1223 .data 1224 .align 2 1225 1226 .globl cr_alignment 1227cr_alignment: 1228 .space 4 1229