1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * arch/arm/include/asm/assembler.h 4 * 5 * Copyright (C) 1996-2000 Russell King 6 * 7 * This file contains arm architecture specific defines 8 * for the different processors. 9 * 10 * Do not include any C declarations in this file - it is included by 11 * assembler source. 12 */ 13 #ifndef __ASM_ASSEMBLER_H__ 14 #define __ASM_ASSEMBLER_H__ 15 16 #ifndef __ASSEMBLY__ 17 #error "Only include this from assembly code" 18 #endif 19 20 #include <asm/ptrace.h> 21 #include <asm/opcodes-virt.h> 22 #include <asm/asm-offsets.h> 23 #include <asm/page.h> 24 #include <asm/thread_info.h> 25 #include <asm/uaccess-asm.h> 26 27 #define IOMEM(x) (x) 28 29 /* 30 * Endian independent macros for shifting bytes within registers. 31 */ 32 #ifndef __ARMEB__ 33 #define lspull lsr 34 #define lspush lsl 35 #define get_byte_0 lsl #0 36 #define get_byte_1 lsr #8 37 #define get_byte_2 lsr #16 38 #define get_byte_3 lsr #24 39 #define put_byte_0 lsl #0 40 #define put_byte_1 lsl #8 41 #define put_byte_2 lsl #16 42 #define put_byte_3 lsl #24 43 #else 44 #define lspull lsl 45 #define lspush lsr 46 #define get_byte_0 lsr #24 47 #define get_byte_1 lsr #16 48 #define get_byte_2 lsr #8 49 #define get_byte_3 lsl #0 50 #define put_byte_0 lsl #24 51 #define put_byte_1 lsl #16 52 #define put_byte_2 lsl #8 53 #define put_byte_3 lsl #0 54 #endif 55 56 /* Select code for any configuration running in BE8 mode */ 57 #ifdef CONFIG_CPU_ENDIAN_BE8 58 #define ARM_BE8(code...) code 59 #else 60 #define ARM_BE8(code...) 61 #endif 62 63 /* 64 * Data preload for architectures that support it 65 */ 66 #if __LINUX_ARM_ARCH__ >= 5 67 #define PLD(code...) code 68 #else 69 #define PLD(code...) 70 #endif 71 72 /* 73 * This can be used to enable code to cacheline align the destination 74 * pointer when bulk writing to memory. Experiments on StrongARM and 75 * XScale didn't show this a worthwhile thing to do when the cache is not 76 * set to write-allocate (this would need further testing on XScale when WA 77 * is used). 78 * 79 * On Feroceon there is much to gain however, regardless of cache mode. 80 */ 81 #ifdef CONFIG_CPU_FEROCEON 82 #define CALGN(code...) code 83 #else 84 #define CALGN(code...) 85 #endif 86 87 #define IMM12_MASK 0xfff 88 89 /* the frame pointer used for stack unwinding */ 90 ARM( fpreg .req r11 ) 91 THUMB( fpreg .req r7 ) 92 93 /* 94 * Enable and disable interrupts 95 */ 96 #if __LINUX_ARM_ARCH__ >= 6 97 .macro disable_irq_notrace 98 cpsid i 99 .endm 100 101 .macro enable_irq_notrace 102 cpsie i 103 .endm 104 #else 105 .macro disable_irq_notrace 106 msr cpsr_c, #PSR_I_BIT | SVC_MODE 107 .endm 108 109 .macro enable_irq_notrace 110 msr cpsr_c, #SVC_MODE 111 .endm 112 #endif 113 114 #if __LINUX_ARM_ARCH__ < 7 115 .macro dsb, args 116 mcr p15, 0, r0, c7, c10, 4 117 .endm 118 119 .macro isb, args 120 mcr p15, 0, r0, c7, c5, 4 121 .endm 122 #endif 123 124 .macro asm_trace_hardirqs_off, save=1 125 #if defined(CONFIG_TRACE_IRQFLAGS) 126 .if \save 127 stmdb sp!, {r0-r3, ip, lr} 128 .endif 129 bl trace_hardirqs_off 130 .if \save 131 ldmia sp!, {r0-r3, ip, lr} 132 .endif 133 #endif 134 .endm 135 136 .macro asm_trace_hardirqs_on, cond=al, save=1 137 #if defined(CONFIG_TRACE_IRQFLAGS) 138 /* 139 * actually the registers should be pushed and pop'd conditionally, but 140 * after bl the flags are certainly clobbered 141 */ 142 .if \save 143 stmdb sp!, {r0-r3, ip, lr} 144 .endif 145 bl\cond trace_hardirqs_on 146 .if \save 147 ldmia sp!, {r0-r3, ip, lr} 148 .endif 149 #endif 150 .endm 151 152 .macro disable_irq, save=1 153 disable_irq_notrace 154 asm_trace_hardirqs_off \save 155 .endm 156 157 .macro enable_irq 158 asm_trace_hardirqs_on 159 enable_irq_notrace 160 .endm 161 /* 162 * Save the current IRQ state and disable IRQs. Note that this macro 163 * assumes FIQs are enabled, and that the processor is in SVC mode. 164 */ 165 .macro save_and_disable_irqs, oldcpsr 166 #ifdef CONFIG_CPU_V7M 167 mrs \oldcpsr, primask 168 #else 169 mrs \oldcpsr, cpsr 170 #endif 171 disable_irq 172 .endm 173 174 .macro save_and_disable_irqs_notrace, oldcpsr 175 #ifdef CONFIG_CPU_V7M 176 mrs \oldcpsr, primask 177 #else 178 mrs \oldcpsr, cpsr 179 #endif 180 disable_irq_notrace 181 .endm 182 183 /* 184 * Restore interrupt state previously stored in a register. We don't 185 * guarantee that this will preserve the flags. 186 */ 187 .macro restore_irqs_notrace, oldcpsr 188 #ifdef CONFIG_CPU_V7M 189 msr primask, \oldcpsr 190 #else 191 msr cpsr_c, \oldcpsr 192 #endif 193 .endm 194 195 .macro restore_irqs, oldcpsr 196 tst \oldcpsr, #PSR_I_BIT 197 asm_trace_hardirqs_on cond=eq 198 restore_irqs_notrace \oldcpsr 199 .endm 200 201 /* 202 * Assembly version of "adr rd, BSYM(sym)". This should only be used to 203 * reference local symbols in the same assembly file which are to be 204 * resolved by the assembler. Other usage is undefined. 205 */ 206 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo 207 .macro badr\c, rd, sym 208 #ifdef CONFIG_THUMB2_KERNEL 209 adr\c \rd, \sym + 1 210 #else 211 adr\c \rd, \sym 212 #endif 213 .endm 214 .endr 215 216 /* 217 * Get current thread_info. 218 */ 219 .macro get_thread_info, rd 220 /* thread_info is the first member of struct task_struct */ 221 get_current \rd 222 .endm 223 224 /* 225 * Increment/decrement the preempt count. 226 */ 227 #ifdef CONFIG_PREEMPT_COUNT 228 .macro inc_preempt_count, ti, tmp 229 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count 230 add \tmp, \tmp, #1 @ increment it 231 str \tmp, [\ti, #TI_PREEMPT] 232 .endm 233 234 .macro dec_preempt_count, ti, tmp 235 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count 236 sub \tmp, \tmp, #1 @ decrement it 237 str \tmp, [\ti, #TI_PREEMPT] 238 .endm 239 #else 240 .macro inc_preempt_count, ti, tmp 241 .endm 242 243 .macro dec_preempt_count, ti, tmp 244 .endm 245 #endif 246 247 #define USERL(l, x...) \ 248 9999: x; \ 249 .pushsection __ex_table,"a"; \ 250 .align 3; \ 251 .long 9999b,l; \ 252 .popsection 253 254 #define USER(x...) USERL(9001f, x) 255 256 #ifdef CONFIG_SMP 257 #define ALT_SMP(instr...) \ 258 9998: instr 259 /* 260 * Note: if you get assembler errors from ALT_UP() when building with 261 * CONFIG_THUMB2_KERNEL, you almost certainly need to use 262 * ALT_SMP( W(instr) ... ) 263 */ 264 #define ALT_UP(instr...) \ 265 .pushsection ".alt.smp.init", "a" ;\ 266 .align 2 ;\ 267 .long 9998b - . ;\ 268 9997: instr ;\ 269 .if . - 9997b == 2 ;\ 270 nop ;\ 271 .endif ;\ 272 .if . - 9997b != 4 ;\ 273 .error "ALT_UP() content must assemble to exactly 4 bytes";\ 274 .endif ;\ 275 .popsection 276 #define ALT_UP_B(label) \ 277 .pushsection ".alt.smp.init", "a" ;\ 278 .align 2 ;\ 279 .long 9998b - . ;\ 280 W(b) . + (label - 9998b) ;\ 281 .popsection 282 #else 283 #define ALT_SMP(instr...) 284 #define ALT_UP(instr...) instr 285 #define ALT_UP_B(label) b label 286 #endif 287 288 /* 289 * this_cpu_offset - load the per-CPU offset of this CPU into 290 * register 'rd' 291 */ 292 .macro this_cpu_offset, rd:req 293 #ifdef CONFIG_SMP 294 ALT_SMP(mrc p15, 0, \rd, c13, c0, 4) 295 #ifdef CONFIG_CPU_V6 296 ALT_UP_B(.L1_\@) 297 .L0_\@: 298 .subsection 1 299 .L1_\@: ldr_va \rd, __per_cpu_offset 300 b .L0_\@ 301 .previous 302 #endif 303 #else 304 mov \rd, #0 305 #endif 306 .endm 307 308 /* 309 * set_current - store the task pointer of this CPU's current task 310 */ 311 .macro set_current, rn:req, tmp:req 312 #if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP) 313 9998: mcr p15, 0, \rn, c13, c0, 3 @ set TPIDRURO register 314 #ifdef CONFIG_CPU_V6 315 ALT_UP_B(.L0_\@) 316 .subsection 1 317 .L0_\@: str_va \rn, __current, \tmp 318 b .L1_\@ 319 .previous 320 .L1_\@: 321 #endif 322 #else 323 str_va \rn, __current, \tmp 324 #endif 325 .endm 326 327 /* 328 * get_current - load the task pointer of this CPU's current task 329 */ 330 .macro get_current, rd:req 331 #if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP) 332 9998: mrc p15, 0, \rd, c13, c0, 3 @ get TPIDRURO register 333 #ifdef CONFIG_CPU_V6 334 ALT_UP_B(.L0_\@) 335 .subsection 1 336 .L0_\@: ldr_va \rd, __current 337 b .L1_\@ 338 .previous 339 .L1_\@: 340 #endif 341 #else 342 ldr_va \rd, __current 343 #endif 344 .endm 345 346 /* 347 * reload_current - reload the task pointer of this CPU's current task 348 * into the TLS register 349 */ 350 .macro reload_current, t1:req, t2:req 351 #if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP) 352 #ifdef CONFIG_CPU_V6 353 ALT_SMP(nop) 354 ALT_UP_B(.L0_\@) 355 #endif 356 ldr_this_cpu \t1, __entry_task, \t1, \t2 357 mcr p15, 0, \t1, c13, c0, 3 @ store in TPIDRURO 358 .L0_\@: 359 #endif 360 .endm 361 362 /* 363 * Instruction barrier 364 */ 365 .macro instr_sync 366 #if __LINUX_ARM_ARCH__ >= 7 367 isb 368 #elif __LINUX_ARM_ARCH__ == 6 369 mcr p15, 0, r0, c7, c5, 4 370 #endif 371 .endm 372 373 /* 374 * SMP data memory barrier 375 */ 376 .macro smp_dmb mode 377 #ifdef CONFIG_SMP 378 #if __LINUX_ARM_ARCH__ >= 7 379 .ifeqs "\mode","arm" 380 ALT_SMP(dmb ish) 381 .else 382 ALT_SMP(W(dmb) ish) 383 .endif 384 #elif __LINUX_ARM_ARCH__ == 6 385 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb 386 #else 387 #error Incompatible SMP platform 388 #endif 389 .ifeqs "\mode","arm" 390 ALT_UP(nop) 391 .else 392 ALT_UP(W(nop)) 393 .endif 394 #endif 395 .endm 396 397 /* 398 * Raw SMP data memory barrier 399 */ 400 .macro __smp_dmb mode 401 #if __LINUX_ARM_ARCH__ >= 7 402 .ifeqs "\mode","arm" 403 dmb ish 404 .else 405 W(dmb) ish 406 .endif 407 #elif __LINUX_ARM_ARCH__ == 6 408 mcr p15, 0, r0, c7, c10, 5 @ dmb 409 #else 410 .error "Incompatible SMP platform" 411 #endif 412 .endm 413 414 #if defined(CONFIG_CPU_V7M) 415 /* 416 * setmode is used to assert to be in svc mode during boot. For v7-M 417 * this is done in __v7m_setup, so setmode can be empty here. 418 */ 419 .macro setmode, mode, reg 420 .endm 421 #elif defined(CONFIG_THUMB2_KERNEL) 422 .macro setmode, mode, reg 423 mov \reg, #\mode 424 msr cpsr_c, \reg 425 .endm 426 #else 427 .macro setmode, mode, reg 428 msr cpsr_c, #\mode 429 .endm 430 #endif 431 432 /* 433 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is 434 * a scratch register for the macro to overwrite. 435 * 436 * This macro is intended for forcing the CPU into SVC mode at boot time. 437 * you cannot return to the original mode. 438 */ 439 .macro safe_svcmode_maskall reg:req 440 #if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M) 441 mrs \reg , cpsr 442 eor \reg, \reg, #HYP_MODE 443 tst \reg, #MODE_MASK 444 bic \reg , \reg , #MODE_MASK 445 orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE 446 THUMB( orr \reg , \reg , #PSR_T_BIT ) 447 bne 1f 448 orr \reg, \reg, #PSR_A_BIT 449 badr lr, 2f 450 msr spsr_cxsf, \reg 451 __MSR_ELR_HYP(14) 452 __ERET 453 1: msr cpsr_c, \reg 454 2: 455 #else 456 /* 457 * workaround for possibly broken pre-v6 hardware 458 * (akita, Sharp Zaurus C-1000, PXA270-based) 459 */ 460 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg 461 #endif 462 .endm 463 464 /* 465 * STRT/LDRT access macros with ARM and Thumb-2 variants 466 */ 467 #ifdef CONFIG_THUMB2_KERNEL 468 469 .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER() 470 9999: 471 .if \inc == 1 472 \instr\()b\t\cond\().w \reg, [\ptr, #\off] 473 .elseif \inc == 4 474 \instr\t\cond\().w \reg, [\ptr, #\off] 475 .else 476 .error "Unsupported inc macro argument" 477 .endif 478 479 .pushsection __ex_table,"a" 480 .align 3 481 .long 9999b, \abort 482 .popsection 483 .endm 484 485 .macro usracc, instr, reg, ptr, inc, cond, rept, abort 486 @ explicit IT instruction needed because of the label 487 @ introduced by the USER macro 488 .ifnc \cond,al 489 .if \rept == 1 490 itt \cond 491 .elseif \rept == 2 492 ittt \cond 493 .else 494 .error "Unsupported rept macro argument" 495 .endif 496 .endif 497 498 @ Slightly optimised to avoid incrementing the pointer twice 499 usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort 500 .if \rept == 2 501 usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort 502 .endif 503 504 add\cond \ptr, #\rept * \inc 505 .endm 506 507 #else /* !CONFIG_THUMB2_KERNEL */ 508 509 .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER() 510 .rept \rept 511 9999: 512 .if \inc == 1 513 \instr\()b\t\cond \reg, [\ptr], #\inc 514 .elseif \inc == 4 515 \instr\t\cond \reg, [\ptr], #\inc 516 .else 517 .error "Unsupported inc macro argument" 518 .endif 519 520 .pushsection __ex_table,"a" 521 .align 3 522 .long 9999b, \abort 523 .popsection 524 .endr 525 .endm 526 527 #endif /* CONFIG_THUMB2_KERNEL */ 528 529 .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f 530 usracc str, \reg, \ptr, \inc, \cond, \rept, \abort 531 .endm 532 533 .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f 534 usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort 535 .endm 536 537 /* Utility macro for declaring string literals */ 538 .macro string name:req, string 539 .type \name , #object 540 \name: 541 .asciz "\string" 542 .size \name , . - \name 543 .endm 544 545 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo 546 .macro ret\c, reg 547 #if __LINUX_ARM_ARCH__ < 6 548 mov\c pc, \reg 549 #else 550 .ifeqs "\reg", "lr" 551 bx\c \reg 552 .else 553 mov\c pc, \reg 554 .endif 555 #endif 556 .endm 557 .endr 558 559 .macro ret.w, reg 560 ret \reg 561 #ifdef CONFIG_THUMB2_KERNEL 562 nop 563 #endif 564 .endm 565 566 .macro bug, msg, line 567 #ifdef CONFIG_THUMB2_KERNEL 568 1: .inst 0xde02 569 #else 570 1: .inst 0xe7f001f2 571 #endif 572 #ifdef CONFIG_DEBUG_BUGVERBOSE 573 .pushsection .rodata.str, "aMS", %progbits, 1 574 2: .asciz "\msg" 575 .popsection 576 .pushsection __bug_table, "aw" 577 .align 2 578 .word 1b, 2b 579 .hword \line 580 .popsection 581 #endif 582 .endm 583 584 #ifdef CONFIG_KPROBES 585 #define _ASM_NOKPROBE(entry) \ 586 .pushsection "_kprobe_blacklist", "aw" ; \ 587 .balign 4 ; \ 588 .long entry; \ 589 .popsection 590 #else 591 #define _ASM_NOKPROBE(entry) 592 #endif 593 594 .macro __adldst_l, op, reg, sym, tmp, c 595 .if __LINUX_ARM_ARCH__ < 7 596 ldr\c \tmp, .La\@ 597 .subsection 1 598 .align 2 599 .La\@: .long \sym - .Lpc\@ 600 .previous 601 .else 602 .ifnb \c 603 THUMB( ittt \c ) 604 .endif 605 movw\c \tmp, #:lower16:\sym - .Lpc\@ 606 movt\c \tmp, #:upper16:\sym - .Lpc\@ 607 .endif 608 609 #ifndef CONFIG_THUMB2_KERNEL 610 .set .Lpc\@, . + 8 // PC bias 611 .ifc \op, add 612 add\c \reg, \tmp, pc 613 .else 614 \op\c \reg, [pc, \tmp] 615 .endif 616 #else 617 .Lb\@: add\c \tmp, \tmp, pc 618 /* 619 * In Thumb-2 builds, the PC bias depends on whether we are currently 620 * emitting into a .arm or a .thumb section. The size of the add opcode 621 * above will be 2 bytes when emitting in Thumb mode and 4 bytes when 622 * emitting in ARM mode, so let's use this to account for the bias. 623 */ 624 .set .Lpc\@, . + (. - .Lb\@) 625 626 .ifnc \op, add 627 \op\c \reg, [\tmp] 628 .endif 629 #endif 630 .endm 631 632 /* 633 * mov_l - move a constant value or [relocated] address into a register 634 */ 635 .macro mov_l, dst:req, imm:req, cond 636 .if __LINUX_ARM_ARCH__ < 7 637 ldr\cond \dst, =\imm 638 .else 639 movw\cond \dst, #:lower16:\imm 640 movt\cond \dst, #:upper16:\imm 641 .endif 642 .endm 643 644 /* 645 * adr_l - adr pseudo-op with unlimited range 646 * 647 * @dst: destination register 648 * @sym: name of the symbol 649 * @cond: conditional opcode suffix 650 */ 651 .macro adr_l, dst:req, sym:req, cond 652 __adldst_l add, \dst, \sym, \dst, \cond 653 .endm 654 655 /* 656 * ldr_l - ldr <literal> pseudo-op with unlimited range 657 * 658 * @dst: destination register 659 * @sym: name of the symbol 660 * @cond: conditional opcode suffix 661 */ 662 .macro ldr_l, dst:req, sym:req, cond 663 __adldst_l ldr, \dst, \sym, \dst, \cond 664 .endm 665 666 /* 667 * str_l - str <literal> pseudo-op with unlimited range 668 * 669 * @src: source register 670 * @sym: name of the symbol 671 * @tmp: mandatory scratch register 672 * @cond: conditional opcode suffix 673 */ 674 .macro str_l, src:req, sym:req, tmp:req, cond 675 __adldst_l str, \src, \sym, \tmp, \cond 676 .endm 677 678 .macro __ldst_va, op, reg, tmp, sym, cond, offset 679 #if __LINUX_ARM_ARCH__ >= 7 || \ 680 !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \ 681 (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS)) 682 mov_l \tmp, \sym, \cond 683 #else 684 /* 685 * Avoid a literal load, by emitting a sequence of ADD/LDR instructions 686 * with the appropriate relocations. The combined sequence has a range 687 * of -/+ 256 MiB, which should be sufficient for the core kernel and 688 * for modules loaded into the module region. 689 */ 690 .globl \sym 691 .reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym 692 .reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym 693 .reloc .L2_\@, R_ARM_LDR_PC_G2, \sym 694 .L0_\@: sub\cond \tmp, pc, #8 - \offset 695 .L1_\@: sub\cond \tmp, \tmp, #4 - \offset 696 .L2_\@: 697 #endif 698 \op\cond \reg, [\tmp, #\offset] 699 .endm 700 701 /* 702 * ldr_va - load a 32-bit word from the virtual address of \sym 703 */ 704 .macro ldr_va, rd:req, sym:req, cond, tmp, offset=0 705 .ifnb \tmp 706 __ldst_va ldr, \rd, \tmp, \sym, \cond, \offset 707 .else 708 __ldst_va ldr, \rd, \rd, \sym, \cond, \offset 709 .endif 710 .endm 711 712 /* 713 * str_va - store a 32-bit word to the virtual address of \sym 714 */ 715 .macro str_va, rn:req, sym:req, tmp:req, cond 716 __ldst_va str, \rn, \tmp, \sym, \cond, 0 717 .endm 718 719 /* 720 * ldr_this_cpu_armv6 - Load a 32-bit word from the per-CPU variable 'sym', 721 * without using a temp register. Supported in ARM mode 722 * only. 723 */ 724 .macro ldr_this_cpu_armv6, rd:req, sym:req 725 this_cpu_offset \rd 726 .globl \sym 727 .reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym 728 .reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym 729 .reloc .L2_\@, R_ARM_LDR_PC_G2, \sym 730 add \rd, \rd, pc 731 .L0_\@: sub \rd, \rd, #4 732 .L1_\@: sub \rd, \rd, #0 733 .L2_\@: ldr \rd, [\rd, #4] 734 .endm 735 736 /* 737 * ldr_this_cpu - Load a 32-bit word from the per-CPU variable 'sym' 738 * into register 'rd', which may be the stack pointer, 739 * using 't1' and 't2' as general temp registers. These 740 * are permitted to overlap with 'rd' if != sp 741 */ 742 .macro ldr_this_cpu, rd:req, sym:req, t1:req, t2:req 743 #ifndef CONFIG_SMP 744 ldr_va \rd, \sym, tmp=\t1 745 #elif __LINUX_ARM_ARCH__ >= 7 || \ 746 !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \ 747 (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS)) 748 this_cpu_offset \t1 749 mov_l \t2, \sym 750 ldr \rd, [\t1, \t2] 751 #else 752 ldr_this_cpu_armv6 \rd, \sym 753 #endif 754 .endm 755 756 /* 757 * rev_l - byte-swap a 32-bit value 758 * 759 * @val: source/destination register 760 * @tmp: scratch register 761 */ 762 .macro rev_l, val:req, tmp:req 763 .if __LINUX_ARM_ARCH__ < 6 764 eor \tmp, \val, \val, ror #16 765 bic \tmp, \tmp, #0x00ff0000 766 mov \val, \val, ror #8 767 eor \val, \val, \tmp, lsr #8 768 .else 769 rev \val, \val 770 .endif 771 .endm 772 773 .if __LINUX_ARM_ARCH__ < 6 774 .set .Lrev_l_uses_tmp, 1 775 .else 776 .set .Lrev_l_uses_tmp, 0 777 .endif 778 779 /* 780 * bl_r - branch and link to register 781 * 782 * @dst: target to branch to 783 * @c: conditional opcode suffix 784 */ 785 .macro bl_r, dst:req, c 786 .if __LINUX_ARM_ARCH__ < 6 787 mov\c lr, pc 788 mov\c pc, \dst 789 .else 790 blx\c \dst 791 .endif 792 .endm 793 794 #endif /* __ASM_ASSEMBLER_H__ */ 795