1/* 2 * linux/arch/arm/kernel/head.S 3 * 4 * Copyright (C) 1994-2002 Russell King 5 * Copyright (c) 2003 ARM Limited 6 * All Rights Reserved 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * Kernel startup code for all 32-bit CPUs 13 */ 14#include <linux/linkage.h> 15#include <linux/init.h> 16 17#include <asm/assembler.h> 18#include <asm/cp15.h> 19#include <asm/domain.h> 20#include <asm/ptrace.h> 21#include <asm/asm-offsets.h> 22#include <asm/memory.h> 23#include <asm/thread_info.h> 24#include <asm/pgtable.h> 25 26#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING) 27#include CONFIG_DEBUG_LL_INCLUDE 28#endif 29 30/* 31 * swapper_pg_dir is the virtual address of the initial page table. 32 * We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must 33 * make sure that KERNEL_RAM_VADDR is correctly set. Currently, we expect 34 * the least significant 16 bits to be 0x8000, but we could probably 35 * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000. 36 */ 37#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) 38#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000 39#error KERNEL_RAM_VADDR must start at 0xXXXX8000 40#endif 41 42#ifdef CONFIG_ARM_LPAE 43 /* LPAE requires an additional page for the PGD */ 44#define PG_DIR_SIZE 0x5000 45#define PMD_ORDER 3 46#else 47#define PG_DIR_SIZE 0x4000 48#define PMD_ORDER 2 49#endif 50 51 .globl swapper_pg_dir 52 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE 53 54 .macro pgtbl, rd, phys 55 add \rd, \phys, #TEXT_OFFSET 56 sub \rd, \rd, #PG_DIR_SIZE 57 .endm 58 59/* 60 * Kernel startup entry point. 61 * --------------------------- 62 * 63 * This is normally called from the decompressor code. The requirements 64 * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0, 65 * r1 = machine nr, r2 = atags or dtb pointer. 66 * 67 * This code is mostly position independent, so if you link the kernel at 68 * 0xc0008000, you call this at __pa(0xc0008000). 69 * 70 * See linux/arch/arm/tools/mach-types for the complete list of machine 71 * numbers for r1. 72 * 73 * We're trying to keep crap to a minimum; DO NOT add any machine specific 74 * crap here - that's what the boot loader (or in extreme, well justified 75 * circumstances, zImage) is for. 76 */ 77 .arm 78 79 __HEAD 80ENTRY(stext) 81 ARM_BE8(setend be ) @ ensure we are in BE8 mode 82 83 THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM. 84 THUMB( bx r9 ) @ If this is a Thumb-2 kernel, 85 THUMB( .thumb ) @ switch to Thumb now. 86 THUMB(1: ) 87 88#ifdef CONFIG_ARM_VIRT_EXT 89 bl __hyp_stub_install 90#endif 91 @ ensure svc mode and all interrupts masked 92 safe_svcmode_maskall r9 93 94 mrc p15, 0, r9, c0, c0 @ get processor id 95 bl __lookup_processor_type @ r5=procinfo r9=cpuid 96 movs r10, r5 @ invalid processor (r5=0)? 97 THUMB( it eq ) @ force fixup-able long branch encoding 98 beq __error_p @ yes, error 'p' 99 100#ifdef CONFIG_ARM_LPAE 101 mrc p15, 0, r3, c0, c1, 4 @ read ID_MMFR0 102 and r3, r3, #0xf @ extract VMSA support 103 cmp r3, #5 @ long-descriptor translation table format? 104 THUMB( it lo ) @ force fixup-able long branch encoding 105 blo __error_lpae @ only classic page table format 106#endif 107 108#ifndef CONFIG_XIP_KERNEL 109 adr r3, 2f 110 ldmia r3, {r4, r8} 111 sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET) 112 add r8, r8, r4 @ PHYS_OFFSET 113#else 114 ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case 115#endif 116 117 /* 118 * r1 = machine no, r2 = atags or dtb, 119 * r8 = phys_offset, r9 = cpuid, r10 = procinfo 120 */ 121 bl __vet_atags 122#ifdef CONFIG_SMP_ON_UP 123 bl __fixup_smp 124#endif 125#ifdef CONFIG_ARM_PATCH_PHYS_VIRT 126 bl __fixup_pv_table 127#endif 128 bl __create_page_tables 129 130 /* 131 * The following calls CPU specific code in a position independent 132 * manner. See arch/arm/mm/proc-*.S for details. r10 = base of 133 * xxx_proc_info structure selected by __lookup_processor_type 134 * above. On return, the CPU will be ready for the MMU to be 135 * turned on, and r0 will hold the CPU control register value. 136 */ 137 ldr r13, =__mmap_switched @ address to jump to after 138 @ mmu has been enabled 139 adr lr, BSYM(1f) @ return (PIC) address 140 mov r8, r4 @ set TTBR1 to swapper_pg_dir 141 ldr r12, [r10, #PROCINFO_INITFUNC] 142 add r12, r12, r10 143 ret r12 1441: b __enable_mmu 145ENDPROC(stext) 146 .ltorg 147#ifndef CONFIG_XIP_KERNEL 1482: .long . 149 .long PAGE_OFFSET 150#endif 151 152/* 153 * Setup the initial page tables. We only setup the barest 154 * amount which are required to get the kernel running, which 155 * generally means mapping in the kernel code. 156 * 157 * r8 = phys_offset, r9 = cpuid, r10 = procinfo 158 * 159 * Returns: 160 * r0, r3, r5-r7 corrupted 161 * r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h) 162 */ 163__create_page_tables: 164 pgtbl r4, r8 @ page table address 165 166 /* 167 * Clear the swapper page table 168 */ 169 mov r0, r4 170 mov r3, #0 171 add r6, r0, #PG_DIR_SIZE 1721: str r3, [r0], #4 173 str r3, [r0], #4 174 str r3, [r0], #4 175 str r3, [r0], #4 176 teq r0, r6 177 bne 1b 178 179#ifdef CONFIG_ARM_LPAE 180 /* 181 * Build the PGD table (first level) to point to the PMD table. A PGD 182 * entry is 64-bit wide. 183 */ 184 mov r0, r4 185 add r3, r4, #0x1000 @ first PMD table address 186 orr r3, r3, #3 @ PGD block type 187 mov r6, #4 @ PTRS_PER_PGD 188 mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER 1891: 190#ifdef CONFIG_CPU_ENDIAN_BE8 191 str r7, [r0], #4 @ set top PGD entry bits 192 str r3, [r0], #4 @ set bottom PGD entry bits 193#else 194 str r3, [r0], #4 @ set bottom PGD entry bits 195 str r7, [r0], #4 @ set top PGD entry bits 196#endif 197 add r3, r3, #0x1000 @ next PMD table 198 subs r6, r6, #1 199 bne 1b 200 201 add r4, r4, #0x1000 @ point to the PMD tables 202#ifdef CONFIG_CPU_ENDIAN_BE8 203 add r4, r4, #4 @ we only write the bottom word 204#endif 205#endif 206 207 ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags 208 209 /* 210 * Create identity mapping to cater for __enable_mmu. 211 * This identity mapping will be removed by paging_init(). 212 */ 213 adr r0, __turn_mmu_on_loc 214 ldmia r0, {r3, r5, r6} 215 sub r0, r0, r3 @ virt->phys offset 216 add r5, r5, r0 @ phys __turn_mmu_on 217 add r6, r6, r0 @ phys __turn_mmu_on_end 218 mov r5, r5, lsr #SECTION_SHIFT 219 mov r6, r6, lsr #SECTION_SHIFT 220 2211: orr r3, r7, r5, lsl #SECTION_SHIFT @ flags + kernel base 222 str r3, [r4, r5, lsl #PMD_ORDER] @ identity mapping 223 cmp r5, r6 224 addlo r5, r5, #1 @ next section 225 blo 1b 226 227 /* 228 * Map our RAM from the start to the end of the kernel .bss section. 229 */ 230 add r0, r4, #PAGE_OFFSET >> (SECTION_SHIFT - PMD_ORDER) 231 ldr r6, =(_end - 1) 232 orr r3, r8, r7 233 add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) 2341: str r3, [r0], #1 << PMD_ORDER 235 add r3, r3, #1 << SECTION_SHIFT 236 cmp r0, r6 237 bls 1b 238 239#ifdef CONFIG_XIP_KERNEL 240 /* 241 * Map the kernel image separately as it is not located in RAM. 242 */ 243#define XIP_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) 244 mov r3, pc 245 mov r3, r3, lsr #SECTION_SHIFT 246 orr r3, r7, r3, lsl #SECTION_SHIFT 247 add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER) 248 str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]! 249 ldr r6, =(_edata_loc - 1) 250 add r0, r0, #1 << PMD_ORDER 251 add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) 2521: cmp r0, r6 253 add r3, r3, #1 << SECTION_SHIFT 254 strls r3, [r0], #1 << PMD_ORDER 255 bls 1b 256#endif 257 258 /* 259 * Then map boot params address in r2 if specified. 260 * We map 2 sections in case the ATAGs/DTB crosses a section boundary. 261 */ 262 mov r0, r2, lsr #SECTION_SHIFT 263 movs r0, r0, lsl #SECTION_SHIFT 264 subne r3, r0, r8 265 addne r3, r3, #PAGE_OFFSET 266 addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) 267 orrne r6, r7, r0 268 strne r6, [r3], #1 << PMD_ORDER 269 addne r6, r6, #1 << SECTION_SHIFT 270 strne r6, [r3] 271 272#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8) 273 sub r4, r4, #4 @ Fixup page table pointer 274 @ for 64-bit descriptors 275#endif 276 277#ifdef CONFIG_DEBUG_LL 278#if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING) 279 /* 280 * Map in IO space for serial debugging. 281 * This allows debug messages to be output 282 * via a serial console before paging_init. 283 */ 284 addruart r7, r3, r0 285 286 mov r3, r3, lsr #SECTION_SHIFT 287 mov r3, r3, lsl #PMD_ORDER 288 289 add r0, r4, r3 290 mov r3, r7, lsr #SECTION_SHIFT 291 ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags 292 orr r3, r7, r3, lsl #SECTION_SHIFT 293#ifdef CONFIG_ARM_LPAE 294 mov r7, #1 << (54 - 32) @ XN 295#ifdef CONFIG_CPU_ENDIAN_BE8 296 str r7, [r0], #4 297 str r3, [r0], #4 298#else 299 str r3, [r0], #4 300 str r7, [r0], #4 301#endif 302#else 303 orr r3, r3, #PMD_SECT_XN 304 str r3, [r0], #4 305#endif 306 307#else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */ 308 /* we don't need any serial debugging mappings */ 309 ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags 310#endif 311 312#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS) 313 /* 314 * If we're using the NetWinder or CATS, we also need to map 315 * in the 16550-type serial port for the debug messages 316 */ 317 add r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ORDER) 318 orr r3, r7, #0x7c000000 319 str r3, [r0] 320#endif 321#ifdef CONFIG_ARCH_RPC 322 /* 323 * Map in screen at 0x02000000 & SCREEN2_BASE 324 * Similar reasons here - for debug. This is 325 * only for Acorn RiscPC architectures. 326 */ 327 add r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ORDER) 328 orr r3, r7, #0x02000000 329 str r3, [r0] 330 add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ORDER) 331 str r3, [r0] 332#endif 333#endif 334#ifdef CONFIG_ARM_LPAE 335 sub r4, r4, #0x1000 @ point to the PGD table 336 mov r4, r4, lsr #ARCH_PGD_SHIFT 337#endif 338 ret lr 339ENDPROC(__create_page_tables) 340 .ltorg 341 .align 342__turn_mmu_on_loc: 343 .long . 344 .long __turn_mmu_on 345 .long __turn_mmu_on_end 346 347#if defined(CONFIG_SMP) 348 .text 349ENTRY(secondary_startup_arm) 350 .arm 351 THUMB( adr r9, BSYM(1f) ) @ Kernel is entered in ARM. 352 THUMB( bx r9 ) @ If this is a Thumb-2 kernel, 353 THUMB( .thumb ) @ switch to Thumb now. 354 THUMB(1: ) 355ENTRY(secondary_startup) 356 /* 357 * Common entry point for secondary CPUs. 358 * 359 * Ensure that we're in SVC mode, and IRQs are disabled. Lookup 360 * the processor type - there is no need to check the machine type 361 * as it has already been validated by the primary processor. 362 */ 363 364 ARM_BE8(setend be) @ ensure we are in BE8 mode 365 366#ifdef CONFIG_ARM_VIRT_EXT 367 bl __hyp_stub_install_secondary 368#endif 369 safe_svcmode_maskall r9 370 371 mrc p15, 0, r9, c0, c0 @ get processor id 372 bl __lookup_processor_type 373 movs r10, r5 @ invalid processor? 374 moveq r0, #'p' @ yes, error 'p' 375 THUMB( it eq ) @ force fixup-able long branch encoding 376 beq __error_p 377 378 /* 379 * Use the page tables supplied from __cpu_up. 380 */ 381 adr r4, __secondary_data 382 ldmia r4, {r5, r7, r12} @ address to jump to after 383 sub lr, r4, r5 @ mmu has been enabled 384 ldr r4, [r7, lr] @ get secondary_data.pgdir 385 add r7, r7, #4 386 ldr r8, [r7, lr] @ get secondary_data.swapper_pg_dir 387 adr lr, BSYM(__enable_mmu) @ return address 388 mov r13, r12 @ __secondary_switched address 389 ldr r12, [r10, #PROCINFO_INITFUNC] 390 add r12, r12, r10 @ initialise processor 391 @ (return control reg) 392 ret r12 393ENDPROC(secondary_startup) 394ENDPROC(secondary_startup_arm) 395 396 /* 397 * r6 = &secondary_data 398 */ 399ENTRY(__secondary_switched) 400 ldr sp, [r7, #4] @ get secondary_data.stack 401 mov fp, #0 402 b secondary_start_kernel 403ENDPROC(__secondary_switched) 404 405 .align 406 407 .type __secondary_data, %object 408__secondary_data: 409 .long . 410 .long secondary_data 411 .long __secondary_switched 412#endif /* defined(CONFIG_SMP) */ 413 414 415 416/* 417 * Setup common bits before finally enabling the MMU. Essentially 418 * this is just loading the page table pointer and domain access 419 * registers. 420 * 421 * r0 = cp#15 control register 422 * r1 = machine ID 423 * r2 = atags or dtb pointer 424 * r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h) 425 * r9 = processor ID 426 * r13 = *virtual* address to jump to upon completion 427 */ 428__enable_mmu: 429#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6 430 orr r0, r0, #CR_A 431#else 432 bic r0, r0, #CR_A 433#endif 434#ifdef CONFIG_CPU_DCACHE_DISABLE 435 bic r0, r0, #CR_C 436#endif 437#ifdef CONFIG_CPU_BPREDICT_DISABLE 438 bic r0, r0, #CR_Z 439#endif 440#ifdef CONFIG_CPU_ICACHE_DISABLE 441 bic r0, r0, #CR_I 442#endif 443#ifndef CONFIG_ARM_LPAE 444 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ 445 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ 446 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ 447 domain_val(DOMAIN_IO, DOMAIN_CLIENT)) 448 mcr p15, 0, r5, c3, c0, 0 @ load domain access register 449 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer 450#endif 451 b __turn_mmu_on 452ENDPROC(__enable_mmu) 453 454/* 455 * Enable the MMU. This completely changes the structure of the visible 456 * memory space. You will not be able to trace execution through this. 457 * If you have an enquiry about this, *please* check the linux-arm-kernel 458 * mailing list archives BEFORE sending another post to the list. 459 * 460 * r0 = cp#15 control register 461 * r1 = machine ID 462 * r2 = atags or dtb pointer 463 * r9 = processor ID 464 * r13 = *virtual* address to jump to upon completion 465 * 466 * other registers depend on the function called upon completion 467 */ 468 .align 5 469 .pushsection .idmap.text, "ax" 470ENTRY(__turn_mmu_on) 471 mov r0, r0 472 instr_sync 473 mcr p15, 0, r0, c1, c0, 0 @ write control reg 474 mrc p15, 0, r3, c0, c0, 0 @ read id reg 475 instr_sync 476 mov r3, r3 477 mov r3, r13 478 ret r3 479__turn_mmu_on_end: 480ENDPROC(__turn_mmu_on) 481 .popsection 482 483 484#ifdef CONFIG_SMP_ON_UP 485 __HEAD 486__fixup_smp: 487 and r3, r9, #0x000f0000 @ architecture version 488 teq r3, #0x000f0000 @ CPU ID supported? 489 bne __fixup_smp_on_up @ no, assume UP 490 491 bic r3, r9, #0x00ff0000 492 bic r3, r3, #0x0000000f @ mask 0xff00fff0 493 mov r4, #0x41000000 494 orr r4, r4, #0x0000b000 495 orr r4, r4, #0x00000020 @ val 0x4100b020 496 teq r3, r4 @ ARM 11MPCore? 497 reteq lr @ yes, assume SMP 498 499 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR 500 and r0, r0, #0xc0000000 @ multiprocessing extensions and 501 teq r0, #0x80000000 @ not part of a uniprocessor system? 502 bne __fixup_smp_on_up @ no, assume UP 503 504 @ Core indicates it is SMP. Check for Aegis SOC where a single 505 @ Cortex-A9 CPU is present but SMP operations fault. 506 mov r4, #0x41000000 507 orr r4, r4, #0x0000c000 508 orr r4, r4, #0x00000090 509 teq r3, r4 @ Check for ARM Cortex-A9 510 retne lr @ Not ARM Cortex-A9, 511 512 @ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the 513 @ below address check will need to be #ifdef'd or equivalent 514 @ for the Aegis platform. 515 mrc p15, 4, r0, c15, c0 @ get SCU base address 516 teq r0, #0x0 @ '0' on actual UP A9 hardware 517 beq __fixup_smp_on_up @ So its an A9 UP 518 ldr r0, [r0, #4] @ read SCU Config 519ARM_BE8(rev r0, r0) @ byteswap if big endian 520 and r0, r0, #0x3 @ number of CPUs 521 teq r0, #0x0 @ is 1? 522 retne lr 523 524__fixup_smp_on_up: 525 adr r0, 1f 526 ldmia r0, {r3 - r5} 527 sub r3, r0, r3 528 add r4, r4, r3 529 add r5, r5, r3 530 b __do_fixup_smp_on_up 531ENDPROC(__fixup_smp) 532 533 .align 5341: .word . 535 .word __smpalt_begin 536 .word __smpalt_end 537 538 .pushsection .data 539 .globl smp_on_up 540smp_on_up: 541 ALT_SMP(.long 1) 542 ALT_UP(.long 0) 543 .popsection 544#endif 545 546 .text 547__do_fixup_smp_on_up: 548 cmp r4, r5 549 reths lr 550 ldmia r4!, {r0, r6} 551 ARM( str r6, [r0, r3] ) 552 THUMB( add r0, r0, r3 ) 553#ifdef __ARMEB__ 554 THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian. 555#endif 556 THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords 557 THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3. 558 THUMB( strh r6, [r0] ) 559 b __do_fixup_smp_on_up 560ENDPROC(__do_fixup_smp_on_up) 561 562ENTRY(fixup_smp) 563 stmfd sp!, {r4 - r6, lr} 564 mov r4, r0 565 add r5, r0, r1 566 mov r3, #0 567 bl __do_fixup_smp_on_up 568 ldmfd sp!, {r4 - r6, pc} 569ENDPROC(fixup_smp) 570 571#ifdef __ARMEB__ 572#define LOW_OFFSET 0x4 573#define HIGH_OFFSET 0x0 574#else 575#define LOW_OFFSET 0x0 576#define HIGH_OFFSET 0x4 577#endif 578 579#ifdef CONFIG_ARM_PATCH_PHYS_VIRT 580 581/* __fixup_pv_table - patch the stub instructions with the delta between 582 * PHYS_OFFSET and PAGE_OFFSET, which is assumed to be 16MiB aligned and 583 * can be expressed by an immediate shifter operand. The stub instruction 584 * has a form of '(add|sub) rd, rn, #imm'. 585 */ 586 __HEAD 587__fixup_pv_table: 588 adr r0, 1f 589 ldmia r0, {r3-r7} 590 mvn ip, #0 591 subs r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET 592 add r4, r4, r3 @ adjust table start address 593 add r5, r5, r3 @ adjust table end address 594 add r6, r6, r3 @ adjust __pv_phys_pfn_offset address 595 add r7, r7, r3 @ adjust __pv_offset address 596 mov r0, r8, lsr #PAGE_SHIFT @ convert to PFN 597 str r0, [r6] @ save computed PHYS_OFFSET to __pv_phys_pfn_offset 598 strcc ip, [r7, #HIGH_OFFSET] @ save to __pv_offset high bits 599 mov r6, r3, lsr #24 @ constant for add/sub instructions 600 teq r3, r6, lsl #24 @ must be 16MiB aligned 601THUMB( it ne @ cross section branch ) 602 bne __error 603 str r3, [r7, #LOW_OFFSET] @ save to __pv_offset low bits 604 b __fixup_a_pv_table 605ENDPROC(__fixup_pv_table) 606 607 .align 6081: .long . 609 .long __pv_table_begin 610 .long __pv_table_end 6112: .long __pv_phys_pfn_offset 612 .long __pv_offset 613 614 .text 615__fixup_a_pv_table: 616 adr r0, 3f 617 ldr r6, [r0] 618 add r6, r6, r3 619 ldr r0, [r6, #HIGH_OFFSET] @ pv_offset high word 620 ldr r6, [r6, #LOW_OFFSET] @ pv_offset low word 621 mov r6, r6, lsr #24 622 cmn r0, #1 623#ifdef CONFIG_THUMB2_KERNEL 624 moveq r0, #0x200000 @ set bit 21, mov to mvn instruction 625 lsls r6, #24 626 beq 2f 627 clz r7, r6 628 lsr r6, #24 629 lsl r6, r7 630 bic r6, #0x0080 631 lsrs r7, #1 632 orrcs r6, #0x0080 633 orr r6, r6, r7, lsl #12 634 orr r6, #0x4000 635 b 2f 6361: add r7, r3 637 ldrh ip, [r7, #2] 638ARM_BE8(rev16 ip, ip) 639 tst ip, #0x4000 640 and ip, #0x8f00 641 orrne ip, r6 @ mask in offset bits 31-24 642 orreq ip, r0 @ mask in offset bits 7-0 643ARM_BE8(rev16 ip, ip) 644 strh ip, [r7, #2] 645 bne 2f 646 ldrh ip, [r7] 647ARM_BE8(rev16 ip, ip) 648 bic ip, #0x20 649 orr ip, ip, r0, lsr #16 650ARM_BE8(rev16 ip, ip) 651 strh ip, [r7] 6522: cmp r4, r5 653 ldrcc r7, [r4], #4 @ use branch for delay slot 654 bcc 1b 655 bx lr 656#else 657#ifdef CONFIG_CPU_ENDIAN_BE8 658 moveq r0, #0x00004000 @ set bit 22, mov to mvn instruction 659#else 660 moveq r0, #0x400000 @ set bit 22, mov to mvn instruction 661#endif 662 b 2f 6631: ldr ip, [r7, r3] 664#ifdef CONFIG_CPU_ENDIAN_BE8 665 @ in BE8, we load data in BE, but instructions still in LE 666 bic ip, ip, #0xff000000 667 tst ip, #0x000f0000 @ check the rotation field 668 orrne ip, ip, r6, lsl #24 @ mask in offset bits 31-24 669 biceq ip, ip, #0x00004000 @ clear bit 22 670 orreq ip, ip, r0 @ mask in offset bits 7-0 671#else 672 bic ip, ip, #0x000000ff 673 tst ip, #0xf00 @ check the rotation field 674 orrne ip, ip, r6 @ mask in offset bits 31-24 675 biceq ip, ip, #0x400000 @ clear bit 22 676 orreq ip, ip, r0 @ mask in offset bits 7-0 677#endif 678 str ip, [r7, r3] 6792: cmp r4, r5 680 ldrcc r7, [r4], #4 @ use branch for delay slot 681 bcc 1b 682 ret lr 683#endif 684ENDPROC(__fixup_a_pv_table) 685 686 .align 6873: .long __pv_offset 688 689ENTRY(fixup_pv_table) 690 stmfd sp!, {r4 - r7, lr} 691 mov r3, #0 @ no offset 692 mov r4, r0 @ r0 = table start 693 add r5, r0, r1 @ r1 = table size 694 bl __fixup_a_pv_table 695 ldmfd sp!, {r4 - r7, pc} 696ENDPROC(fixup_pv_table) 697 698 .data 699 .globl __pv_phys_pfn_offset 700 .type __pv_phys_pfn_offset, %object 701__pv_phys_pfn_offset: 702 .word 0 703 .size __pv_phys_pfn_offset, . -__pv_phys_pfn_offset 704 705 .globl __pv_offset 706 .type __pv_offset, %object 707__pv_offset: 708 .quad 0 709 .size __pv_offset, . -__pv_offset 710#endif 711 712#include "head-common.S" 713