1/* 2 * linux/arch/arm/kernel/entry-armv.S 3 * 4 * Copyright (C) 1996,1997,1998 Russell King. 5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk) 6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com) 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * Low-level vector interface routines 13 * 14 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction 15 * that causes it to save wrong values... Be aware! 16 */ 17 18#include <asm/memory.h> 19#include <asm/glue-df.h> 20#include <asm/glue-pf.h> 21#include <asm/vfpmacros.h> 22#include <mach/entry-macro.S> 23#include <asm/thread_notify.h> 24#include <asm/unwind.h> 25#include <asm/unistd.h> 26#include <asm/tls.h> 27 28#include "entry-header.S" 29#include <asm/entry-macro-multi.S> 30 31/* 32 * Interrupt handling. 33 */ 34 .macro irq_handler 35#ifdef CONFIG_MULTI_IRQ_HANDLER 36 ldr r1, =handle_arch_irq 37 mov r0, sp 38 ldr r1, [r1] 39 adr lr, BSYM(9997f) 40 teq r1, #0 41 movne pc, r1 42#endif 43 arch_irq_handler_default 449997: 45 .endm 46 47 .macro pabt_helper 48 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5 49#ifdef MULTI_PABORT 50 ldr ip, .LCprocfns 51 mov lr, pc 52 ldr pc, [ip, #PROCESSOR_PABT_FUNC] 53#else 54 bl CPU_PABORT_HANDLER 55#endif 56 .endm 57 58 .macro dabt_helper 59 60 @ 61 @ Call the processor-specific abort handler: 62 @ 63 @ r2 - pt_regs 64 @ r4 - aborted context pc 65 @ r5 - aborted context psr 66 @ 67 @ The abort handler must return the aborted address in r0, and 68 @ the fault status register in r1. r9 must be preserved. 69 @ 70#ifdef MULTI_DABORT 71 ldr ip, .LCprocfns 72 mov lr, pc 73 ldr pc, [ip, #PROCESSOR_DABT_FUNC] 74#else 75 bl CPU_DABORT_HANDLER 76#endif 77 .endm 78 79#ifdef CONFIG_KPROBES 80 .section .kprobes.text,"ax",%progbits 81#else 82 .text 83#endif 84 85/* 86 * Invalid mode handlers 87 */ 88 .macro inv_entry, reason 89 sub sp, sp, #S_FRAME_SIZE 90 ARM( stmib sp, {r1 - lr} ) 91 THUMB( stmia sp, {r0 - r12} ) 92 THUMB( str sp, [sp, #S_SP] ) 93 THUMB( str lr, [sp, #S_LR] ) 94 mov r1, #\reason 95 .endm 96 97__pabt_invalid: 98 inv_entry BAD_PREFETCH 99 b common_invalid 100ENDPROC(__pabt_invalid) 101 102__dabt_invalid: 103 inv_entry BAD_DATA 104 b common_invalid 105ENDPROC(__dabt_invalid) 106 107__irq_invalid: 108 inv_entry BAD_IRQ 109 b common_invalid 110ENDPROC(__irq_invalid) 111 112__und_invalid: 113 inv_entry BAD_UNDEFINSTR 114 115 @ 116 @ XXX fall through to common_invalid 117 @ 118 119@ 120@ common_invalid - generic code for failed exception (re-entrant version of handlers) 121@ 122common_invalid: 123 zero_fp 124 125 ldmia r0, {r4 - r6} 126 add r0, sp, #S_PC @ here for interlock avoidance 127 mov r7, #-1 @ "" "" "" "" 128 str r4, [sp] @ save preserved r0 129 stmia r0, {r5 - r7} @ lr_<exception>, 130 @ cpsr_<exception>, "old_r0" 131 132 mov r0, sp 133 b bad_mode 134ENDPROC(__und_invalid) 135 136/* 137 * SVC mode handlers 138 */ 139 140#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) 141#define SPFIX(code...) code 142#else 143#define SPFIX(code...) 144#endif 145 146 .macro svc_entry, stack_hole=0 147 UNWIND(.fnstart ) 148 UNWIND(.save {r0 - pc} ) 149 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4) 150#ifdef CONFIG_THUMB2_KERNEL 151 SPFIX( str r0, [sp] ) @ temporarily saved 152 SPFIX( mov r0, sp ) 153 SPFIX( tst r0, #4 ) @ test original stack alignment 154 SPFIX( ldr r0, [sp] ) @ restored 155#else 156 SPFIX( tst sp, #4 ) 157#endif 158 SPFIX( subeq sp, sp, #4 ) 159 stmia sp, {r1 - r12} 160 161 ldmia r0, {r3 - r5} 162 add r7, sp, #S_SP - 4 @ here for interlock avoidance 163 mov r6, #-1 @ "" "" "" "" 164 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4) 165 SPFIX( addeq r2, r2, #4 ) 166 str r3, [sp, #-4]! @ save the "real" r0 copied 167 @ from the exception stack 168 169 mov r3, lr 170 171 @ 172 @ We are now ready to fill in the remaining blanks on the stack: 173 @ 174 @ r2 - sp_svc 175 @ r3 - lr_svc 176 @ r4 - lr_<exception>, already fixed up for correct return/restart 177 @ r5 - spsr_<exception> 178 @ r6 - orig_r0 (see pt_regs definition in ptrace.h) 179 @ 180 stmia r7, {r2 - r6} 181 182#ifdef CONFIG_TRACE_IRQFLAGS 183 bl trace_hardirqs_off 184#endif 185 .endm 186 187 .align 5 188__dabt_svc: 189 svc_entry 190 mov r2, sp 191 dabt_helper 192 193 @ 194 @ IRQs off again before pulling preserved data off the stack 195 @ 196 disable_irq_notrace 197 198#ifdef CONFIG_TRACE_IRQFLAGS 199 tst r5, #PSR_I_BIT 200 bleq trace_hardirqs_on 201 tst r5, #PSR_I_BIT 202 blne trace_hardirqs_off 203#endif 204 svc_exit r5 @ return from exception 205 UNWIND(.fnend ) 206ENDPROC(__dabt_svc) 207 208 .align 5 209__irq_svc: 210 svc_entry 211 irq_handler 212 213#ifdef CONFIG_PREEMPT 214 get_thread_info tsk 215 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count 216 ldr r0, [tsk, #TI_FLAGS] @ get flags 217 teq r8, #0 @ if preempt count != 0 218 movne r0, #0 @ force flags to 0 219 tst r0, #_TIF_NEED_RESCHED 220 blne svc_preempt 221#endif 222 223#ifdef CONFIG_TRACE_IRQFLAGS 224 @ The parent context IRQs must have been enabled to get here in 225 @ the first place, so there's no point checking the PSR I bit. 226 bl trace_hardirqs_on 227#endif 228 svc_exit r5 @ return from exception 229 UNWIND(.fnend ) 230ENDPROC(__irq_svc) 231 232 .ltorg 233 234#ifdef CONFIG_PREEMPT 235svc_preempt: 236 mov r8, lr 2371: bl preempt_schedule_irq @ irq en/disable is done inside 238 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS 239 tst r0, #_TIF_NEED_RESCHED 240 moveq pc, r8 @ go again 241 b 1b 242#endif 243 244 .align 5 245__und_svc: 246#ifdef CONFIG_KPROBES 247 @ If a kprobe is about to simulate a "stmdb sp..." instruction, 248 @ it obviously needs free stack space which then will belong to 249 @ the saved context. 250 svc_entry 64 251#else 252 svc_entry 253#endif 254 @ 255 @ call emulation code, which returns using r9 if it has emulated 256 @ the instruction, or the more conventional lr if we are to treat 257 @ this as a real undefined instruction 258 @ 259 @ r0 - instruction 260 @ 261#ifndef CONFIG_THUMB2_KERNEL 262 ldr r0, [r4, #-4] 263#else 264 ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2 265 and r9, r0, #0xf800 266 cmp r9, #0xe800 @ 32-bit instruction if xx >= 0 267 ldrhhs r9, [r4] @ bottom 16 bits 268 orrhs r0, r9, r0, lsl #16 269#endif 270 adr r9, BSYM(1f) 271 mov r2, r4 272 bl call_fpe 273 274 mov r0, sp @ struct pt_regs *regs 275 bl do_undefinstr 276 277 @ 278 @ IRQs off again before pulling preserved data off the stack 279 @ 2801: disable_irq_notrace 281 282 @ 283 @ restore SPSR and restart the instruction 284 @ 285 ldr r5, [sp, #S_PSR] @ Get SVC cpsr 286#ifdef CONFIG_TRACE_IRQFLAGS 287 tst r5, #PSR_I_BIT 288 bleq trace_hardirqs_on 289 tst r5, #PSR_I_BIT 290 blne trace_hardirqs_off 291#endif 292 svc_exit r5 @ return from exception 293 UNWIND(.fnend ) 294ENDPROC(__und_svc) 295 296 .align 5 297__pabt_svc: 298 svc_entry 299 mov r2, sp @ regs 300 pabt_helper 301 302 @ 303 @ IRQs off again before pulling preserved data off the stack 304 @ 305 disable_irq_notrace 306 307#ifdef CONFIG_TRACE_IRQFLAGS 308 tst r5, #PSR_I_BIT 309 bleq trace_hardirqs_on 310 tst r5, #PSR_I_BIT 311 blne trace_hardirqs_off 312#endif 313 svc_exit r5 @ return from exception 314 UNWIND(.fnend ) 315ENDPROC(__pabt_svc) 316 317 .align 5 318.LCcralign: 319 .word cr_alignment 320#ifdef MULTI_DABORT 321.LCprocfns: 322 .word processor 323#endif 324.LCfp: 325 .word fp_enter 326 327/* 328 * User mode handlers 329 * 330 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE 331 */ 332 333#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7) 334#error "sizeof(struct pt_regs) must be a multiple of 8" 335#endif 336 337 .macro usr_entry 338 UNWIND(.fnstart ) 339 UNWIND(.cantunwind ) @ don't unwind the user space 340 sub sp, sp, #S_FRAME_SIZE 341 ARM( stmib sp, {r1 - r12} ) 342 THUMB( stmia sp, {r0 - r12} ) 343 344 ldmia r0, {r3 - r5} 345 add r0, sp, #S_PC @ here for interlock avoidance 346 mov r6, #-1 @ "" "" "" "" 347 348 str r3, [sp] @ save the "real" r0 copied 349 @ from the exception stack 350 351 @ 352 @ We are now ready to fill in the remaining blanks on the stack: 353 @ 354 @ r4 - lr_<exception>, already fixed up for correct return/restart 355 @ r5 - spsr_<exception> 356 @ r6 - orig_r0 (see pt_regs definition in ptrace.h) 357 @ 358 @ Also, separately save sp_usr and lr_usr 359 @ 360 stmia r0, {r4 - r6} 361 ARM( stmdb r0, {sp, lr}^ ) 362 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) 363 364 @ 365 @ Enable the alignment trap while in kernel mode 366 @ 367 alignment_trap r0 368 369 @ 370 @ Clear FP to mark the first stack frame 371 @ 372 zero_fp 373 374#ifdef CONFIG_IRQSOFF_TRACER 375 bl trace_hardirqs_off 376#endif 377 .endm 378 379 .macro kuser_cmpxchg_check 380#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 381#ifndef CONFIG_MMU 382#warning "NPTL on non MMU needs fixing" 383#else 384 @ Make sure our user space atomic helper is restarted 385 @ if it was interrupted in a critical region. Here we 386 @ perform a quick test inline since it should be false 387 @ 99.9999% of the time. The rest is done out of line. 388 cmp r4, #TASK_SIZE 389 blhs kuser_cmpxchg64_fixup 390#endif 391#endif 392 .endm 393 394 .align 5 395__dabt_usr: 396 usr_entry 397 kuser_cmpxchg_check 398 mov r2, sp 399 dabt_helper 400 b ret_from_exception 401 UNWIND(.fnend ) 402ENDPROC(__dabt_usr) 403 404 .align 5 405__irq_usr: 406 usr_entry 407 kuser_cmpxchg_check 408 irq_handler 409 get_thread_info tsk 410 mov why, #0 411 b ret_to_user_from_irq 412 UNWIND(.fnend ) 413ENDPROC(__irq_usr) 414 415 .ltorg 416 417 .align 5 418__und_usr: 419 usr_entry 420 421 mov r2, r4 422 mov r3, r5 423 424 @ 425 @ fall through to the emulation code, which returns using r9 if 426 @ it has emulated the instruction, or the more conventional lr 427 @ if we are to treat this as a real undefined instruction 428 @ 429 @ r0 - instruction 430 @ 431 adr r9, BSYM(ret_from_exception) 432 adr lr, BSYM(__und_usr_unknown) 433 tst r3, #PSR_T_BIT @ Thumb mode? 434 itet eq @ explicit IT needed for the 1f label 435 subeq r4, r2, #4 @ ARM instr at LR - 4 436 subne r4, r2, #2 @ Thumb instr at LR - 2 4371: ldreqt r0, [r4] 438#ifdef CONFIG_CPU_ENDIAN_BE8 439 reveq r0, r0 @ little endian instruction 440#endif 441 beq call_fpe 442 @ Thumb instruction 443#if __LINUX_ARM_ARCH__ >= 7 4442: 445 ARM( ldrht r5, [r4], #2 ) 446 THUMB( ldrht r5, [r4] ) 447 THUMB( add r4, r4, #2 ) 448 and r0, r5, #0xf800 @ mask bits 111x x... .... .... 449 cmp r0, #0xe800 @ 32bit instruction if xx != 0 450 blo __und_usr_unknown 4513: ldrht r0, [r4] 452 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 453 orr r0, r0, r5, lsl #16 454#else 455 b __und_usr_unknown 456#endif 457 UNWIND(.fnend ) 458ENDPROC(__und_usr) 459 460 @ 461 @ fallthrough to call_fpe 462 @ 463 464/* 465 * The out of line fixup for the ldrt above. 466 */ 467 .pushsection .fixup, "ax" 4684: mov pc, r9 469 .popsection 470 .pushsection __ex_table,"a" 471 .long 1b, 4b 472#if __LINUX_ARM_ARCH__ >= 7 473 .long 2b, 4b 474 .long 3b, 4b 475#endif 476 .popsection 477 478/* 479 * Check whether the instruction is a co-processor instruction. 480 * If yes, we need to call the relevant co-processor handler. 481 * 482 * Note that we don't do a full check here for the co-processor 483 * instructions; all instructions with bit 27 set are well 484 * defined. The only instructions that should fault are the 485 * co-processor instructions. However, we have to watch out 486 * for the ARM6/ARM7 SWI bug. 487 * 488 * NEON is a special case that has to be handled here. Not all 489 * NEON instructions are co-processor instructions, so we have 490 * to make a special case of checking for them. Plus, there's 491 * five groups of them, so we have a table of mask/opcode pairs 492 * to check against, and if any match then we branch off into the 493 * NEON handler code. 494 * 495 * Emulators may wish to make use of the following registers: 496 * r0 = instruction opcode. 497 * r2 = PC+4 498 * r9 = normal "successful" return address 499 * r10 = this threads thread_info structure. 500 * lr = unrecognised instruction return address 501 */ 502 @ 503 @ Fall-through from Thumb-2 __und_usr 504 @ 505#ifdef CONFIG_NEON 506 adr r6, .LCneon_thumb_opcodes 507 b 2f 508#endif 509call_fpe: 510#ifdef CONFIG_NEON 511 adr r6, .LCneon_arm_opcodes 5122: 513 ldr r7, [r6], #4 @ mask value 514 cmp r7, #0 @ end mask? 515 beq 1f 516 and r8, r0, r7 517 ldr r7, [r6], #4 @ opcode bits matching in mask 518 cmp r8, r7 @ NEON instruction? 519 bne 2b 520 get_thread_info r10 521 mov r7, #1 522 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used 523 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used 524 b do_vfp @ let VFP handler handle this 5251: 526#endif 527 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 528 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 529#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710) 530 and r8, r0, #0x0f000000 @ mask out op-code bits 531 teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)? 532#endif 533 moveq pc, lr 534 get_thread_info r10 @ get current thread 535 and r8, r0, #0x00000f00 @ mask out CP number 536 THUMB( lsr r8, r8, #8 ) 537 mov r7, #1 538 add r6, r10, #TI_USED_CP 539 ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[] 540 THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[] 541#ifdef CONFIG_IWMMXT 542 @ Test if we need to give access to iWMMXt coprocessors 543 ldr r5, [r10, #TI_FLAGS] 544 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only 545 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) 546 bcs iwmmxt_task_enable 547#endif 548 ARM( add pc, pc, r8, lsr #6 ) 549 THUMB( lsl r8, r8, #2 ) 550 THUMB( add pc, r8 ) 551 nop 552 553 movw_pc lr @ CP#0 554 W(b) do_fpe @ CP#1 (FPE) 555 W(b) do_fpe @ CP#2 (FPE) 556 movw_pc lr @ CP#3 557#ifdef CONFIG_CRUNCH 558 b crunch_task_enable @ CP#4 (MaverickCrunch) 559 b crunch_task_enable @ CP#5 (MaverickCrunch) 560 b crunch_task_enable @ CP#6 (MaverickCrunch) 561#else 562 movw_pc lr @ CP#4 563 movw_pc lr @ CP#5 564 movw_pc lr @ CP#6 565#endif 566 movw_pc lr @ CP#7 567 movw_pc lr @ CP#8 568 movw_pc lr @ CP#9 569#ifdef CONFIG_VFP 570 W(b) do_vfp @ CP#10 (VFP) 571 W(b) do_vfp @ CP#11 (VFP) 572#else 573 movw_pc lr @ CP#10 (VFP) 574 movw_pc lr @ CP#11 (VFP) 575#endif 576 movw_pc lr @ CP#12 577 movw_pc lr @ CP#13 578 movw_pc lr @ CP#14 (Debug) 579 movw_pc lr @ CP#15 (Control) 580 581#ifdef CONFIG_NEON 582 .align 6 583 584.LCneon_arm_opcodes: 585 .word 0xfe000000 @ mask 586 .word 0xf2000000 @ opcode 587 588 .word 0xff100000 @ mask 589 .word 0xf4000000 @ opcode 590 591 .word 0x00000000 @ mask 592 .word 0x00000000 @ opcode 593 594.LCneon_thumb_opcodes: 595 .word 0xef000000 @ mask 596 .word 0xef000000 @ opcode 597 598 .word 0xff100000 @ mask 599 .word 0xf9000000 @ opcode 600 601 .word 0x00000000 @ mask 602 .word 0x00000000 @ opcode 603#endif 604 605do_fpe: 606 enable_irq 607 ldr r4, .LCfp 608 add r10, r10, #TI_FPSTATE @ r10 = workspace 609 ldr pc, [r4] @ Call FP module USR entry point 610 611/* 612 * The FP module is called with these registers set: 613 * r0 = instruction 614 * r2 = PC+4 615 * r9 = normal "successful" return address 616 * r10 = FP workspace 617 * lr = unrecognised FP instruction return address 618 */ 619 620 .pushsection .data 621ENTRY(fp_enter) 622 .word no_fp 623 .popsection 624 625ENTRY(no_fp) 626 mov pc, lr 627ENDPROC(no_fp) 628 629__und_usr_unknown: 630 enable_irq 631 mov r0, sp 632 adr lr, BSYM(ret_from_exception) 633 b do_undefinstr 634ENDPROC(__und_usr_unknown) 635 636 .align 5 637__pabt_usr: 638 usr_entry 639 mov r2, sp @ regs 640 pabt_helper 641 UNWIND(.fnend ) 642 /* fall through */ 643/* 644 * This is the return code to user mode for abort handlers 645 */ 646ENTRY(ret_from_exception) 647 UNWIND(.fnstart ) 648 UNWIND(.cantunwind ) 649 get_thread_info tsk 650 mov why, #0 651 b ret_to_user 652 UNWIND(.fnend ) 653ENDPROC(__pabt_usr) 654ENDPROC(ret_from_exception) 655 656/* 657 * Register switch for ARMv3 and ARMv4 processors 658 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info 659 * previous and next are guaranteed not to be the same. 660 */ 661ENTRY(__switch_to) 662 UNWIND(.fnstart ) 663 UNWIND(.cantunwind ) 664 add ip, r1, #TI_CPU_SAVE 665 ldr r3, [r2, #TI_TP_VALUE] 666 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack 667 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack 668 THUMB( str sp, [ip], #4 ) 669 THUMB( str lr, [ip], #4 ) 670#ifdef CONFIG_CPU_USE_DOMAINS 671 ldr r6, [r2, #TI_CPU_DOMAIN] 672#endif 673 set_tls r3, r4, r5 674#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) 675 ldr r7, [r2, #TI_TASK] 676 ldr r8, =__stack_chk_guard 677 ldr r7, [r7, #TSK_STACK_CANARY] 678#endif 679#ifdef CONFIG_CPU_USE_DOMAINS 680 mcr p15, 0, r6, c3, c0, 0 @ Set domain register 681#endif 682 mov r5, r0 683 add r4, r2, #TI_CPU_SAVE 684 ldr r0, =thread_notify_head 685 mov r1, #THREAD_NOTIFY_SWITCH 686 bl atomic_notifier_call_chain 687#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) 688 str r7, [r8] 689#endif 690 THUMB( mov ip, r4 ) 691 mov r0, r5 692 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously 693 THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously 694 THUMB( ldr sp, [ip], #4 ) 695 THUMB( ldr pc, [ip] ) 696 UNWIND(.fnend ) 697ENDPROC(__switch_to) 698 699 __INIT 700 701/* 702 * User helpers. 703 * 704 * Each segment is 32-byte aligned and will be moved to the top of the high 705 * vector page. New segments (if ever needed) must be added in front of 706 * existing ones. This mechanism should be used only for things that are 707 * really small and justified, and not be abused freely. 708 * 709 * See Documentation/arm/kernel_user_helpers.txt for formal definitions. 710 */ 711 THUMB( .arm ) 712 713 .macro usr_ret, reg 714#ifdef CONFIG_ARM_THUMB 715 bx \reg 716#else 717 mov pc, \reg 718#endif 719 .endm 720 721 .align 5 722 .globl __kuser_helper_start 723__kuser_helper_start: 724 725/* 726 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular 727 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point. 728 */ 729 730__kuser_cmpxchg64: @ 0xffff0f60 731 732#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 733 734 /* 735 * Poor you. No fast solution possible... 736 * The kernel itself must perform the operation. 737 * A special ghost syscall is used for that (see traps.c). 738 */ 739 stmfd sp!, {r7, lr} 740 ldr r7, 1f @ it's 20 bits 741 swi __ARM_NR_cmpxchg64 742 ldmfd sp!, {r7, pc} 7431: .word __ARM_NR_cmpxchg64 744 745#elif defined(CONFIG_CPU_32v6K) 746 747 stmfd sp!, {r4, r5, r6, r7} 748 ldrd r4, r5, [r0] @ load old val 749 ldrd r6, r7, [r1] @ load new val 750 smp_dmb arm 7511: ldrexd r0, r1, [r2] @ load current val 752 eors r3, r0, r4 @ compare with oldval (1) 753 eoreqs r3, r1, r5 @ compare with oldval (2) 754 strexdeq r3, r6, r7, [r2] @ store newval if eq 755 teqeq r3, #1 @ success? 756 beq 1b @ if no then retry 757 smp_dmb arm 758 rsbs r0, r3, #0 @ set returned val and C flag 759 ldmfd sp!, {r4, r5, r6, r7} 760 bx lr 761 762#elif !defined(CONFIG_SMP) 763 764#ifdef CONFIG_MMU 765 766 /* 767 * The only thing that can break atomicity in this cmpxchg64 768 * implementation is either an IRQ or a data abort exception 769 * causing another process/thread to be scheduled in the middle of 770 * the critical sequence. The same strategy as for cmpxchg is used. 771 */ 772 stmfd sp!, {r4, r5, r6, lr} 773 ldmia r0, {r4, r5} @ load old val 774 ldmia r1, {r6, lr} @ load new val 7751: ldmia r2, {r0, r1} @ load current val 776 eors r3, r0, r4 @ compare with oldval (1) 777 eoreqs r3, r1, r5 @ compare with oldval (2) 7782: stmeqia r2, {r6, lr} @ store newval if eq 779 rsbs r0, r3, #0 @ set return val and C flag 780 ldmfd sp!, {r4, r5, r6, pc} 781 782 .text 783kuser_cmpxchg64_fixup: 784 @ Called from kuser_cmpxchg_fixup. 785 @ r4 = address of interrupted insn (must be preserved). 786 @ sp = saved regs. r7 and r8 are clobbered. 787 @ 1b = first critical insn, 2b = last critical insn. 788 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b. 789 mov r7, #0xffff0fff 790 sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64))) 791 subs r8, r4, r7 792 rsbcss r8, r8, #(2b - 1b) 793 strcs r7, [sp, #S_PC] 794#if __LINUX_ARM_ARCH__ < 6 795 bcc kuser_cmpxchg32_fixup 796#endif 797 mov pc, lr 798 .previous 799 800#else 801#warning "NPTL on non MMU needs fixing" 802 mov r0, #-1 803 adds r0, r0, #0 804 usr_ret lr 805#endif 806 807#else 808#error "incoherent kernel configuration" 809#endif 810 811 /* pad to next slot */ 812 .rept (16 - (. - __kuser_cmpxchg64)/4) 813 .word 0 814 .endr 815 816 .align 5 817 818__kuser_memory_barrier: @ 0xffff0fa0 819 smp_dmb arm 820 usr_ret lr 821 822 .align 5 823 824__kuser_cmpxchg: @ 0xffff0fc0 825 826#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 827 828 /* 829 * Poor you. No fast solution possible... 830 * The kernel itself must perform the operation. 831 * A special ghost syscall is used for that (see traps.c). 832 */ 833 stmfd sp!, {r7, lr} 834 ldr r7, 1f @ it's 20 bits 835 swi __ARM_NR_cmpxchg 836 ldmfd sp!, {r7, pc} 8371: .word __ARM_NR_cmpxchg 838 839#elif __LINUX_ARM_ARCH__ < 6 840 841#ifdef CONFIG_MMU 842 843 /* 844 * The only thing that can break atomicity in this cmpxchg 845 * implementation is either an IRQ or a data abort exception 846 * causing another process/thread to be scheduled in the middle 847 * of the critical sequence. To prevent this, code is added to 848 * the IRQ and data abort exception handlers to set the pc back 849 * to the beginning of the critical section if it is found to be 850 * within that critical section (see kuser_cmpxchg_fixup). 851 */ 8521: ldr r3, [r2] @ load current val 853 subs r3, r3, r0 @ compare with oldval 8542: streq r1, [r2] @ store newval if eq 855 rsbs r0, r3, #0 @ set return val and C flag 856 usr_ret lr 857 858 .text 859kuser_cmpxchg32_fixup: 860 @ Called from kuser_cmpxchg_check macro. 861 @ r4 = address of interrupted insn (must be preserved). 862 @ sp = saved regs. r7 and r8 are clobbered. 863 @ 1b = first critical insn, 2b = last critical insn. 864 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b. 865 mov r7, #0xffff0fff 866 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) 867 subs r8, r4, r7 868 rsbcss r8, r8, #(2b - 1b) 869 strcs r7, [sp, #S_PC] 870 mov pc, lr 871 .previous 872 873#else 874#warning "NPTL on non MMU needs fixing" 875 mov r0, #-1 876 adds r0, r0, #0 877 usr_ret lr 878#endif 879 880#else 881 882 smp_dmb arm 8831: ldrex r3, [r2] 884 subs r3, r3, r0 885 strexeq r3, r1, [r2] 886 teqeq r3, #1 887 beq 1b 888 rsbs r0, r3, #0 889 /* beware -- each __kuser slot must be 8 instructions max */ 890 ALT_SMP(b __kuser_memory_barrier) 891 ALT_UP(usr_ret lr) 892 893#endif 894 895 .align 5 896 897__kuser_get_tls: @ 0xffff0fe0 898 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init 899 usr_ret lr 900 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code 901 .rep 4 902 .word 0 @ 0xffff0ff0 software TLS value, then 903 .endr @ pad up to __kuser_helper_version 904 905__kuser_helper_version: @ 0xffff0ffc 906 .word ((__kuser_helper_end - __kuser_helper_start) >> 5) 907 908 .globl __kuser_helper_end 909__kuser_helper_end: 910 911 THUMB( .thumb ) 912 913/* 914 * Vector stubs. 915 * 916 * This code is copied to 0xffff0200 so we can use branches in the 917 * vectors, rather than ldr's. Note that this code must not 918 * exceed 0x300 bytes. 919 * 920 * Common stub entry macro: 921 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC 922 * 923 * SP points to a minimal amount of processor-private memory, the address 924 * of which is copied into r0 for the mode specific abort handler. 925 */ 926 .macro vector_stub, name, mode, correction=0 927 .align 5 928 929vector_\name: 930 .if \correction 931 sub lr, lr, #\correction 932 .endif 933 934 @ 935 @ Save r0, lr_<exception> (parent PC) and spsr_<exception> 936 @ (parent CPSR) 937 @ 938 stmia sp, {r0, lr} @ save r0, lr 939 mrs lr, spsr 940 str lr, [sp, #8] @ save spsr 941 942 @ 943 @ Prepare for SVC32 mode. IRQs remain disabled. 944 @ 945 mrs r0, cpsr 946 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE) 947 msr spsr_cxsf, r0 948 949 @ 950 @ the branch table must immediately follow this code 951 @ 952 and lr, lr, #0x0f 953 THUMB( adr r0, 1f ) 954 THUMB( ldr lr, [r0, lr, lsl #2] ) 955 mov r0, sp 956 ARM( ldr lr, [pc, lr, lsl #2] ) 957 movs pc, lr @ branch to handler in SVC mode 958ENDPROC(vector_\name) 959 960 .align 2 961 @ handler addresses follow this label 9621: 963 .endm 964 965 .globl __stubs_start 966__stubs_start: 967/* 968 * Interrupt dispatcher 969 */ 970 vector_stub irq, IRQ_MODE, 4 971 972 .long __irq_usr @ 0 (USR_26 / USR_32) 973 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32) 974 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32) 975 .long __irq_svc @ 3 (SVC_26 / SVC_32) 976 .long __irq_invalid @ 4 977 .long __irq_invalid @ 5 978 .long __irq_invalid @ 6 979 .long __irq_invalid @ 7 980 .long __irq_invalid @ 8 981 .long __irq_invalid @ 9 982 .long __irq_invalid @ a 983 .long __irq_invalid @ b 984 .long __irq_invalid @ c 985 .long __irq_invalid @ d 986 .long __irq_invalid @ e 987 .long __irq_invalid @ f 988 989/* 990 * Data abort dispatcher 991 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC 992 */ 993 vector_stub dabt, ABT_MODE, 8 994 995 .long __dabt_usr @ 0 (USR_26 / USR_32) 996 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32) 997 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32) 998 .long __dabt_svc @ 3 (SVC_26 / SVC_32) 999 .long __dabt_invalid @ 4 1000 .long __dabt_invalid @ 5 1001 .long __dabt_invalid @ 6 1002 .long __dabt_invalid @ 7 1003 .long __dabt_invalid @ 8 1004 .long __dabt_invalid @ 9 1005 .long __dabt_invalid @ a 1006 .long __dabt_invalid @ b 1007 .long __dabt_invalid @ c 1008 .long __dabt_invalid @ d 1009 .long __dabt_invalid @ e 1010 .long __dabt_invalid @ f 1011 1012/* 1013 * Prefetch abort dispatcher 1014 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC 1015 */ 1016 vector_stub pabt, ABT_MODE, 4 1017 1018 .long __pabt_usr @ 0 (USR_26 / USR_32) 1019 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32) 1020 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32) 1021 .long __pabt_svc @ 3 (SVC_26 / SVC_32) 1022 .long __pabt_invalid @ 4 1023 .long __pabt_invalid @ 5 1024 .long __pabt_invalid @ 6 1025 .long __pabt_invalid @ 7 1026 .long __pabt_invalid @ 8 1027 .long __pabt_invalid @ 9 1028 .long __pabt_invalid @ a 1029 .long __pabt_invalid @ b 1030 .long __pabt_invalid @ c 1031 .long __pabt_invalid @ d 1032 .long __pabt_invalid @ e 1033 .long __pabt_invalid @ f 1034 1035/* 1036 * Undef instr entry dispatcher 1037 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC 1038 */ 1039 vector_stub und, UND_MODE 1040 1041 .long __und_usr @ 0 (USR_26 / USR_32) 1042 .long __und_invalid @ 1 (FIQ_26 / FIQ_32) 1043 .long __und_invalid @ 2 (IRQ_26 / IRQ_32) 1044 .long __und_svc @ 3 (SVC_26 / SVC_32) 1045 .long __und_invalid @ 4 1046 .long __und_invalid @ 5 1047 .long __und_invalid @ 6 1048 .long __und_invalid @ 7 1049 .long __und_invalid @ 8 1050 .long __und_invalid @ 9 1051 .long __und_invalid @ a 1052 .long __und_invalid @ b 1053 .long __und_invalid @ c 1054 .long __und_invalid @ d 1055 .long __und_invalid @ e 1056 .long __und_invalid @ f 1057 1058 .align 5 1059 1060/*============================================================================= 1061 * Undefined FIQs 1062 *----------------------------------------------------------------------------- 1063 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC 1064 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg. 1065 * Basically to switch modes, we *HAVE* to clobber one register... brain 1066 * damage alert! I don't think that we can execute any code in here in any 1067 * other mode than FIQ... Ok you can switch to another mode, but you can't 1068 * get out of that mode without clobbering one register. 1069 */ 1070vector_fiq: 1071 disable_fiq 1072 subs pc, lr, #4 1073 1074/*============================================================================= 1075 * Address exception handler 1076 *----------------------------------------------------------------------------- 1077 * These aren't too critical. 1078 * (they're not supposed to happen, and won't happen in 32-bit data mode). 1079 */ 1080 1081vector_addrexcptn: 1082 b vector_addrexcptn 1083 1084/* 1085 * We group all the following data together to optimise 1086 * for CPUs with separate I & D caches. 1087 */ 1088 .align 5 1089 1090.LCvswi: 1091 .word vector_swi 1092 1093 .globl __stubs_end 1094__stubs_end: 1095 1096 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start 1097 1098 .globl __vectors_start 1099__vectors_start: 1100 ARM( swi SYS_ERROR0 ) 1101 THUMB( svc #0 ) 1102 THUMB( nop ) 1103 W(b) vector_und + stubs_offset 1104 W(ldr) pc, .LCvswi + stubs_offset 1105 W(b) vector_pabt + stubs_offset 1106 W(b) vector_dabt + stubs_offset 1107 W(b) vector_addrexcptn + stubs_offset 1108 W(b) vector_irq + stubs_offset 1109 W(b) vector_fiq + stubs_offset 1110 1111 .globl __vectors_end 1112__vectors_end: 1113 1114 .data 1115 1116 .globl cr_alignment 1117 .globl cr_no_alignment 1118cr_alignment: 1119 .space 4 1120cr_no_alignment: 1121 .space 4 1122 1123#ifdef CONFIG_MULTI_IRQ_HANDLER 1124 .globl handle_arch_irq 1125handle_arch_irq: 1126 .space 4 1127#endif 1128