1/* 2 * linux/arch/arm/boot/compressed/head.S 3 * 4 * Copyright (C) 1996-2002 Russell King 5 * Copyright (C) 2004 Hyok S. Choi (MPU support) 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11#include <linux/linkage.h> 12#include <asm/assembler.h> 13 14 .arch armv7-a 15/* 16 * Debugging stuff 17 * 18 * Note that these macros must not contain any code which is not 19 * 100% relocatable. Any attempt to do so will result in a crash. 20 * Please select one of the following when turning on debugging. 21 */ 22#ifdef DEBUG 23 24#if defined(CONFIG_DEBUG_ICEDCC) 25 26#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) 27 .macro loadsp, rb, tmp 28 .endm 29 .macro writeb, ch, rb 30 mcr p14, 0, \ch, c0, c5, 0 31 .endm 32#elif defined(CONFIG_CPU_XSCALE) 33 .macro loadsp, rb, tmp 34 .endm 35 .macro writeb, ch, rb 36 mcr p14, 0, \ch, c8, c0, 0 37 .endm 38#else 39 .macro loadsp, rb, tmp 40 .endm 41 .macro writeb, ch, rb 42 mcr p14, 0, \ch, c1, c0, 0 43 .endm 44#endif 45 46#else 47 48#include CONFIG_DEBUG_LL_INCLUDE 49 50 .macro writeb, ch, rb 51 senduart \ch, \rb 52 .endm 53 54#if defined(CONFIG_ARCH_SA1100) 55 .macro loadsp, rb, tmp 56 mov \rb, #0x80000000 @ physical base address 57#ifdef CONFIG_DEBUG_LL_SER3 58 add \rb, \rb, #0x00050000 @ Ser3 59#else 60 add \rb, \rb, #0x00010000 @ Ser1 61#endif 62 .endm 63#else 64 .macro loadsp, rb, tmp 65 addruart \rb, \tmp 66 .endm 67#endif 68#endif 69#endif 70 71 .macro kputc,val 72 mov r0, \val 73 bl putc 74 .endm 75 76 .macro kphex,val,len 77 mov r0, \val 78 mov r1, #\len 79 bl phex 80 .endm 81 82 .macro debug_reloc_start 83#ifdef DEBUG 84 kputc #'\n' 85 kphex r6, 8 /* processor id */ 86 kputc #':' 87 kphex r7, 8 /* architecture id */ 88#ifdef CONFIG_CPU_CP15 89 kputc #':' 90 mrc p15, 0, r0, c1, c0 91 kphex r0, 8 /* control reg */ 92#endif 93 kputc #'\n' 94 kphex r5, 8 /* decompressed kernel start */ 95 kputc #'-' 96 kphex r9, 8 /* decompressed kernel end */ 97 kputc #'>' 98 kphex r4, 8 /* kernel execution address */ 99 kputc #'\n' 100#endif 101 .endm 102 103 .macro debug_reloc_end 104#ifdef DEBUG 105 kphex r5, 8 /* end of kernel */ 106 kputc #'\n' 107 mov r0, r4 108 bl memdump /* dump 256 bytes at start of kernel */ 109#endif 110 .endm 111 112 .section ".start", #alloc, #execinstr 113/* 114 * sort out different calling conventions 115 */ 116 .align 117 .arm @ Always enter in ARM state 118start: 119 .type start,#function 120 .rept 7 121 mov r0, r0 122 .endr 123 ARM( mov r0, r0 ) 124 ARM( b 1f ) 125 THUMB( adr r12, BSYM(1f) ) 126 THUMB( bx r12 ) 127 128 .word _magic_sig @ Magic numbers to help the loader 129 .word _magic_start @ absolute load/run zImage address 130 .word _magic_end @ zImage end address 131 .word 0x04030201 @ endianness flag 132 133 THUMB( .thumb ) 1341: 135 ARM_BE8( setend be ) @ go BE8 if compiled for BE8 136 mrs r9, cpsr 137#ifdef CONFIG_ARM_VIRT_EXT 138 bl __hyp_stub_install @ get into SVC mode, reversibly 139#endif 140 mov r7, r1 @ save architecture ID 141 mov r8, r2 @ save atags pointer 142 143 /* 144 * Booting from Angel - need to enter SVC mode and disable 145 * FIQs/IRQs (numeric definitions from angel arm.h source). 146 * We only do this if we were in user mode on entry. 147 */ 148 mrs r2, cpsr @ get current mode 149 tst r2, #3 @ not user? 150 bne not_angel 151 mov r0, #0x17 @ angel_SWIreason_EnterSVC 152 ARM( swi 0x123456 ) @ angel_SWI_ARM 153 THUMB( svc 0xab ) @ angel_SWI_THUMB 154not_angel: 155 safe_svcmode_maskall r0 156 msr spsr_cxsf, r9 @ Save the CPU boot mode in 157 @ SPSR 158 /* 159 * Note that some cache flushing and other stuff may 160 * be needed here - is there an Angel SWI call for this? 161 */ 162 163 /* 164 * some architecture specific code can be inserted 165 * by the linker here, but it should preserve r7, r8, and r9. 166 */ 167 168 .text 169 170#ifdef CONFIG_AUTO_ZRELADDR 171 @ determine final kernel image address 172 mov r4, pc 173 and r4, r4, #0xf8000000 174 add r4, r4, #TEXT_OFFSET 175#else 176 ldr r4, =zreladdr 177#endif 178 179 /* 180 * Set up a page table only if it won't overwrite ourself. 181 * That means r4 < pc || r4 - 16k page directory > &_end. 182 * Given that r4 > &_end is most unfrequent, we add a rough 183 * additional 1MB of room for a possible appended DTB. 184 */ 185 mov r0, pc 186 cmp r0, r4 187 ldrcc r0, LC0+32 188 addcc r0, r0, pc 189 cmpcc r4, r0 190 orrcc r4, r4, #1 @ remember we skipped cache_on 191 blcs cache_on 192 193restart: adr r0, LC0 194 ldmia r0, {r1, r2, r3, r6, r10, r11, r12} 195 ldr sp, [r0, #28] 196 197 /* 198 * We might be running at a different address. We need 199 * to fix up various pointers. 200 */ 201 sub r0, r0, r1 @ calculate the delta offset 202 add r6, r6, r0 @ _edata 203 add r10, r10, r0 @ inflated kernel size location 204 205 /* 206 * The kernel build system appends the size of the 207 * decompressed kernel at the end of the compressed data 208 * in little-endian form. 209 */ 210 ldrb r9, [r10, #0] 211 ldrb lr, [r10, #1] 212 orr r9, r9, lr, lsl #8 213 ldrb lr, [r10, #2] 214 ldrb r10, [r10, #3] 215 orr r9, r9, lr, lsl #16 216 orr r9, r9, r10, lsl #24 217 218#ifndef CONFIG_ZBOOT_ROM 219 /* malloc space is above the relocated stack (64k max) */ 220 add sp, sp, r0 221 add r10, sp, #0x10000 222#else 223 /* 224 * With ZBOOT_ROM the bss/stack is non relocatable, 225 * but someone could still run this code from RAM, 226 * in which case our reference is _edata. 227 */ 228 mov r10, r6 229#endif 230 231 mov r5, #0 @ init dtb size to 0 232#ifdef CONFIG_ARM_APPENDED_DTB 233/* 234 * r0 = delta 235 * r2 = BSS start 236 * r3 = BSS end 237 * r4 = final kernel address (possibly with LSB set) 238 * r5 = appended dtb size (still unknown) 239 * r6 = _edata 240 * r7 = architecture ID 241 * r8 = atags/device tree pointer 242 * r9 = size of decompressed image 243 * r10 = end of this image, including bss/stack/malloc space if non XIP 244 * r11 = GOT start 245 * r12 = GOT end 246 * sp = stack pointer 247 * 248 * if there are device trees (dtb) appended to zImage, advance r10 so that the 249 * dtb data will get relocated along with the kernel if necessary. 250 */ 251 252 ldr lr, [r6, #0] 253#ifndef __ARMEB__ 254 ldr r1, =0xedfe0dd0 @ sig is 0xd00dfeed big endian 255#else 256 ldr r1, =0xd00dfeed 257#endif 258 cmp lr, r1 259 bne dtb_check_done @ not found 260 261#ifdef CONFIG_ARM_ATAG_DTB_COMPAT 262 /* 263 * OK... Let's do some funky business here. 264 * If we do have a DTB appended to zImage, and we do have 265 * an ATAG list around, we want the later to be translated 266 * and folded into the former here. No GOT fixup has occurred 267 * yet, but none of the code we're about to call uses any 268 * global variable. 269 */ 270 271 /* Get the initial DTB size */ 272 ldr r5, [r6, #4] 273#ifndef __ARMEB__ 274 /* convert to little endian */ 275 eor r1, r5, r5, ror #16 276 bic r1, r1, #0x00ff0000 277 mov r5, r5, ror #8 278 eor r5, r5, r1, lsr #8 279#endif 280 /* 50% DTB growth should be good enough */ 281 add r5, r5, r5, lsr #1 282 /* preserve 64-bit alignment */ 283 add r5, r5, #7 284 bic r5, r5, #7 285 /* clamp to 32KB min and 1MB max */ 286 cmp r5, #(1 << 15) 287 movlo r5, #(1 << 15) 288 cmp r5, #(1 << 20) 289 movhi r5, #(1 << 20) 290 /* temporarily relocate the stack past the DTB work space */ 291 add sp, sp, r5 292 293 stmfd sp!, {r0-r3, ip, lr} 294 mov r0, r8 295 mov r1, r6 296 mov r2, r5 297 bl atags_to_fdt 298 299 /* 300 * If returned value is 1, there is no ATAG at the location 301 * pointed by r8. Try the typical 0x100 offset from start 302 * of RAM and hope for the best. 303 */ 304 cmp r0, #1 305 sub r0, r4, #TEXT_OFFSET 306 bic r0, r0, #1 307 add r0, r0, #0x100 308 mov r1, r6 309 mov r2, r5 310 bleq atags_to_fdt 311 312 ldmfd sp!, {r0-r3, ip, lr} 313 sub sp, sp, r5 314#endif 315 316 mov r8, r6 @ use the appended device tree 317 318 /* 319 * Make sure that the DTB doesn't end up in the final 320 * kernel's .bss area. To do so, we adjust the decompressed 321 * kernel size to compensate if that .bss size is larger 322 * than the relocated code. 323 */ 324 ldr r5, =_kernel_bss_size 325 adr r1, wont_overwrite 326 sub r1, r6, r1 327 subs r1, r5, r1 328 addhi r9, r9, r1 329 330 /* Get the current DTB size */ 331 ldr r5, [r6, #4] 332#ifndef __ARMEB__ 333 /* convert r5 (dtb size) to little endian */ 334 eor r1, r5, r5, ror #16 335 bic r1, r1, #0x00ff0000 336 mov r5, r5, ror #8 337 eor r5, r5, r1, lsr #8 338#endif 339 340 /* preserve 64-bit alignment */ 341 add r5, r5, #7 342 bic r5, r5, #7 343 344 /* relocate some pointers past the appended dtb */ 345 add r6, r6, r5 346 add r10, r10, r5 347 add sp, sp, r5 348dtb_check_done: 349#endif 350 351/* 352 * Check to see if we will overwrite ourselves. 353 * r4 = final kernel address (possibly with LSB set) 354 * r9 = size of decompressed image 355 * r10 = end of this image, including bss/stack/malloc space if non XIP 356 * We basically want: 357 * r4 - 16k page directory >= r10 -> OK 358 * r4 + image length <= address of wont_overwrite -> OK 359 * Note: the possible LSB in r4 is harmless here. 360 */ 361 add r10, r10, #16384 362 cmp r4, r10 363 bhs wont_overwrite 364 add r10, r4, r9 365 adr r9, wont_overwrite 366 cmp r10, r9 367 bls wont_overwrite 368 369/* 370 * Relocate ourselves past the end of the decompressed kernel. 371 * r6 = _edata 372 * r10 = end of the decompressed kernel 373 * Because we always copy ahead, we need to do it from the end and go 374 * backward in case the source and destination overlap. 375 */ 376 /* 377 * Bump to the next 256-byte boundary with the size of 378 * the relocation code added. This avoids overwriting 379 * ourself when the offset is small. 380 */ 381 add r10, r10, #((reloc_code_end - restart + 256) & ~255) 382 bic r10, r10, #255 383 384 /* Get start of code we want to copy and align it down. */ 385 adr r5, restart 386 bic r5, r5, #31 387 388/* Relocate the hyp vector base if necessary */ 389#ifdef CONFIG_ARM_VIRT_EXT 390 mrs r0, spsr 391 and r0, r0, #MODE_MASK 392 cmp r0, #HYP_MODE 393 bne 1f 394 395 bl __hyp_get_vectors 396 sub r0, r0, r5 397 add r0, r0, r10 398 bl __hyp_set_vectors 3991: 400#endif 401 402 sub r9, r6, r5 @ size to copy 403 add r9, r9, #31 @ rounded up to a multiple 404 bic r9, r9, #31 @ ... of 32 bytes 405 add r6, r9, r5 406 add r9, r9, r10 407 4081: ldmdb r6!, {r0 - r3, r10 - r12, lr} 409 cmp r6, r5 410 stmdb r9!, {r0 - r3, r10 - r12, lr} 411 bhi 1b 412 413 /* Preserve offset to relocated code. */ 414 sub r6, r9, r6 415 416#ifndef CONFIG_ZBOOT_ROM 417 /* cache_clean_flush may use the stack, so relocate it */ 418 add sp, sp, r6 419#endif 420 421 bl cache_clean_flush 422 423 adr r0, BSYM(restart) 424 add r0, r0, r6 425 mov pc, r0 426 427wont_overwrite: 428/* 429 * If delta is zero, we are running at the address we were linked at. 430 * r0 = delta 431 * r2 = BSS start 432 * r3 = BSS end 433 * r4 = kernel execution address (possibly with LSB set) 434 * r5 = appended dtb size (0 if not present) 435 * r7 = architecture ID 436 * r8 = atags pointer 437 * r11 = GOT start 438 * r12 = GOT end 439 * sp = stack pointer 440 */ 441 orrs r1, r0, r5 442 beq not_relocated 443 444 add r11, r11, r0 445 add r12, r12, r0 446 447#ifndef CONFIG_ZBOOT_ROM 448 /* 449 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n, 450 * we need to fix up pointers into the BSS region. 451 * Note that the stack pointer has already been fixed up. 452 */ 453 add r2, r2, r0 454 add r3, r3, r0 455 456 /* 457 * Relocate all entries in the GOT table. 458 * Bump bss entries to _edata + dtb size 459 */ 4601: ldr r1, [r11, #0] @ relocate entries in the GOT 461 add r1, r1, r0 @ This fixes up C references 462 cmp r1, r2 @ if entry >= bss_start && 463 cmphs r3, r1 @ bss_end > entry 464 addhi r1, r1, r5 @ entry += dtb size 465 str r1, [r11], #4 @ next entry 466 cmp r11, r12 467 blo 1b 468 469 /* bump our bss pointers too */ 470 add r2, r2, r5 471 add r3, r3, r5 472 473#else 474 475 /* 476 * Relocate entries in the GOT table. We only relocate 477 * the entries that are outside the (relocated) BSS region. 478 */ 4791: ldr r1, [r11, #0] @ relocate entries in the GOT 480 cmp r1, r2 @ entry < bss_start || 481 cmphs r3, r1 @ _end < entry 482 addlo r1, r1, r0 @ table. This fixes up the 483 str r1, [r11], #4 @ C references. 484 cmp r11, r12 485 blo 1b 486#endif 487 488not_relocated: mov r0, #0 4891: str r0, [r2], #4 @ clear bss 490 str r0, [r2], #4 491 str r0, [r2], #4 492 str r0, [r2], #4 493 cmp r2, r3 494 blo 1b 495 496 /* 497 * Did we skip the cache setup earlier? 498 * That is indicated by the LSB in r4. 499 * Do it now if so. 500 */ 501 tst r4, #1 502 bic r4, r4, #1 503 blne cache_on 504 505/* 506 * The C runtime environment should now be setup sufficiently. 507 * Set up some pointers, and start decompressing. 508 * r4 = kernel execution address 509 * r7 = architecture ID 510 * r8 = atags pointer 511 */ 512 mov r0, r4 513 mov r1, sp @ malloc space above stack 514 add r2, sp, #0x10000 @ 64k max 515 mov r3, r7 516 bl decompress_kernel 517 bl cache_clean_flush 518 bl cache_off 519 mov r1, r7 @ restore architecture number 520 mov r2, r8 @ restore atags pointer 521 522#ifdef CONFIG_ARM_VIRT_EXT 523 mrs r0, spsr @ Get saved CPU boot mode 524 and r0, r0, #MODE_MASK 525 cmp r0, #HYP_MODE @ if not booted in HYP mode... 526 bne __enter_kernel @ boot kernel directly 527 528 adr r12, .L__hyp_reentry_vectors_offset 529 ldr r0, [r12] 530 add r0, r0, r12 531 532 bl __hyp_set_vectors 533 __HVC(0) @ otherwise bounce to hyp mode 534 535 b . @ should never be reached 536 537 .align 2 538.L__hyp_reentry_vectors_offset: .long __hyp_reentry_vectors - . 539#else 540 b __enter_kernel 541#endif 542 543 .align 2 544 .type LC0, #object 545LC0: .word LC0 @ r1 546 .word __bss_start @ r2 547 .word _end @ r3 548 .word _edata @ r6 549 .word input_data_end - 4 @ r10 (inflated size location) 550 .word _got_start @ r11 551 .word _got_end @ ip 552 .word .L_user_stack_end @ sp 553 .word _end - restart + 16384 + 1024*1024 554 .size LC0, . - LC0 555 556#ifdef CONFIG_ARCH_RPC 557 .globl params 558params: ldr r0, =0x10000100 @ params_phys for RPC 559 mov pc, lr 560 .ltorg 561 .align 562#endif 563 564/* 565 * Turn on the cache. We need to setup some page tables so that we 566 * can have both the I and D caches on. 567 * 568 * We place the page tables 16k down from the kernel execution address, 569 * and we hope that nothing else is using it. If we're using it, we 570 * will go pop! 571 * 572 * On entry, 573 * r4 = kernel execution address 574 * r7 = architecture number 575 * r8 = atags pointer 576 * On exit, 577 * r0, r1, r2, r3, r9, r10, r12 corrupted 578 * This routine must preserve: 579 * r4, r7, r8 580 */ 581 .align 5 582cache_on: mov r3, #8 @ cache_on function 583 b call_cache_fn 584 585/* 586 * Initialize the highest priority protection region, PR7 587 * to cover all 32bit address and cacheable and bufferable. 588 */ 589__armv4_mpu_cache_on: 590 mov r0, #0x3f @ 4G, the whole 591 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting 592 mcr p15, 0, r0, c6, c7, 1 593 594 mov r0, #0x80 @ PR7 595 mcr p15, 0, r0, c2, c0, 0 @ D-cache on 596 mcr p15, 0, r0, c2, c0, 1 @ I-cache on 597 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on 598 599 mov r0, #0xc000 600 mcr p15, 0, r0, c5, c0, 1 @ I-access permission 601 mcr p15, 0, r0, c5, c0, 0 @ D-access permission 602 603 mov r0, #0 604 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 605 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache 606 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache 607 mrc p15, 0, r0, c1, c0, 0 @ read control reg 608 @ ...I .... ..D. WC.M 609 orr r0, r0, #0x002d @ .... .... ..1. 11.1 610 orr r0, r0, #0x1000 @ ...1 .... .... .... 611 612 mcr p15, 0, r0, c1, c0, 0 @ write control reg 613 614 mov r0, #0 615 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache 616 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache 617 mov pc, lr 618 619__armv3_mpu_cache_on: 620 mov r0, #0x3f @ 4G, the whole 621 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting 622 623 mov r0, #0x80 @ PR7 624 mcr p15, 0, r0, c2, c0, 0 @ cache on 625 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on 626 627 mov r0, #0xc000 628 mcr p15, 0, r0, c5, c0, 0 @ access permission 629 630 mov r0, #0 631 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 632 /* 633 * ?? ARMv3 MMU does not allow reading the control register, 634 * does this really work on ARMv3 MPU? 635 */ 636 mrc p15, 0, r0, c1, c0, 0 @ read control reg 637 @ .... .... .... WC.M 638 orr r0, r0, #0x000d @ .... .... .... 11.1 639 /* ?? this overwrites the value constructed above? */ 640 mov r0, #0 641 mcr p15, 0, r0, c1, c0, 0 @ write control reg 642 643 /* ?? invalidate for the second time? */ 644 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 645 mov pc, lr 646 647#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 648#define CB_BITS 0x08 649#else 650#define CB_BITS 0x0c 651#endif 652 653__setup_mmu: sub r3, r4, #16384 @ Page directory size 654 bic r3, r3, #0xff @ Align the pointer 655 bic r3, r3, #0x3f00 656/* 657 * Initialise the page tables, turning on the cacheable and bufferable 658 * bits for the RAM area only. 659 */ 660 mov r0, r3 661 mov r9, r0, lsr #18 662 mov r9, r9, lsl #18 @ start of RAM 663 add r10, r9, #0x10000000 @ a reasonable RAM size 664 mov r1, #0x12 @ XN|U + section mapping 665 orr r1, r1, #3 << 10 @ AP=11 666 add r2, r3, #16384 6671: cmp r1, r9 @ if virt > start of RAM 668 cmphs r10, r1 @ && end of RAM > virt 669 bic r1, r1, #0x1c @ clear XN|U + C + B 670 orrlo r1, r1, #0x10 @ Set XN|U for non-RAM 671 orrhs r1, r1, r6 @ set RAM section settings 672 str r1, [r0], #4 @ 1:1 mapping 673 add r1, r1, #1048576 674 teq r0, r2 675 bne 1b 676/* 677 * If ever we are running from Flash, then we surely want the cache 678 * to be enabled also for our execution instance... We map 2MB of it 679 * so there is no map overlap problem for up to 1 MB compressed kernel. 680 * If the execution is in RAM then we would only be duplicating the above. 681 */ 682 orr r1, r6, #0x04 @ ensure B is set for this 683 orr r1, r1, #3 << 10 684 mov r2, pc 685 mov r2, r2, lsr #20 686 orr r1, r1, r2, lsl #20 687 add r0, r3, r2, lsl #2 688 str r1, [r0], #4 689 add r1, r1, #1048576 690 str r1, [r0] 691 mov pc, lr 692ENDPROC(__setup_mmu) 693 694@ Enable unaligned access on v6, to allow better code generation 695@ for the decompressor C code: 696__armv6_mmu_cache_on: 697 mrc p15, 0, r0, c1, c0, 0 @ read SCTLR 698 bic r0, r0, #2 @ A (no unaligned access fault) 699 orr r0, r0, #1 << 22 @ U (v6 unaligned access model) 700 mcr p15, 0, r0, c1, c0, 0 @ write SCTLR 701 b __armv4_mmu_cache_on 702 703__arm926ejs_mmu_cache_on: 704#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 705 mov r0, #4 @ put dcache in WT mode 706 mcr p15, 7, r0, c15, c0, 0 707#endif 708 709__armv4_mmu_cache_on: 710 mov r12, lr 711#ifdef CONFIG_MMU 712 mov r6, #CB_BITS | 0x12 @ U 713 bl __setup_mmu 714 mov r0, #0 715 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 716 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs 717 mrc p15, 0, r0, c1, c0, 0 @ read control reg 718 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement 719 orr r0, r0, #0x0030 720 ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables 721 bl __common_mmu_cache_on 722 mov r0, #0 723 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs 724#endif 725 mov pc, r12 726 727__armv7_mmu_cache_on: 728 mov r12, lr 729#ifdef CONFIG_MMU 730 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0 731 tst r11, #0xf @ VMSA 732 movne r6, #CB_BITS | 0x02 @ !XN 733 blne __setup_mmu 734 mov r0, #0 735 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 736 tst r11, #0xf @ VMSA 737 mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs 738#endif 739 mrc p15, 0, r0, c1, c0, 0 @ read control reg 740 bic r0, r0, #1 << 28 @ clear SCTLR.TRE 741 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement 742 orr r0, r0, #0x003c @ write buffer 743 bic r0, r0, #2 @ A (no unaligned access fault) 744 orr r0, r0, #1 << 22 @ U (v6 unaligned access model) 745 @ (needed for ARM1176) 746#ifdef CONFIG_MMU 747 ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables 748 mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg 749 orrne r0, r0, #1 @ MMU enabled 750 movne r1, #0xfffffffd @ domain 0 = client 751 bic r6, r6, #1 << 31 @ 32-bit translation system 752 bic r6, r6, #3 << 0 @ use only ttbr0 753 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer 754 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control 755 mcrne p15, 0, r6, c2, c0, 2 @ load ttb control 756#endif 757 mcr p15, 0, r0, c7, c5, 4 @ ISB 758 mcr p15, 0, r0, c1, c0, 0 @ load control register 759 mrc p15, 0, r0, c1, c0, 0 @ and read it back 760 mov r0, #0 761 mcr p15, 0, r0, c7, c5, 4 @ ISB 762 mov pc, r12 763 764__fa526_cache_on: 765 mov r12, lr 766 mov r6, #CB_BITS | 0x12 @ U 767 bl __setup_mmu 768 mov r0, #0 769 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache 770 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 771 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB 772 mrc p15, 0, r0, c1, c0, 0 @ read control reg 773 orr r0, r0, #0x1000 @ I-cache enable 774 bl __common_mmu_cache_on 775 mov r0, #0 776 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB 777 mov pc, r12 778 779__common_mmu_cache_on: 780#ifndef CONFIG_THUMB2_KERNEL 781#ifndef DEBUG 782 orr r0, r0, #0x000d @ Write buffer, mmu 783#endif 784 mov r1, #-1 785 mcr p15, 0, r3, c2, c0, 0 @ load page table pointer 786 mcr p15, 0, r1, c3, c0, 0 @ load domain access control 787 b 1f 788 .align 5 @ cache line aligned 7891: mcr p15, 0, r0, c1, c0, 0 @ load control register 790 mrc p15, 0, r0, c1, c0, 0 @ and read it back to 791 sub pc, lr, r0, lsr #32 @ properly flush pipeline 792#endif 793 794#define PROC_ENTRY_SIZE (4*5) 795 796/* 797 * Here follow the relocatable cache support functions for the 798 * various processors. This is a generic hook for locating an 799 * entry and jumping to an instruction at the specified offset 800 * from the start of the block. Please note this is all position 801 * independent code. 802 * 803 * r1 = corrupted 804 * r2 = corrupted 805 * r3 = block offset 806 * r9 = corrupted 807 * r12 = corrupted 808 */ 809 810call_cache_fn: adr r12, proc_types 811#ifdef CONFIG_CPU_CP15 812 mrc p15, 0, r9, c0, c0 @ get processor ID 813#else 814 ldr r9, =CONFIG_PROCESSOR_ID 815#endif 8161: ldr r1, [r12, #0] @ get value 817 ldr r2, [r12, #4] @ get mask 818 eor r1, r1, r9 @ (real ^ match) 819 tst r1, r2 @ & mask 820 ARM( addeq pc, r12, r3 ) @ call cache function 821 THUMB( addeq r12, r3 ) 822 THUMB( moveq pc, r12 ) @ call cache function 823 add r12, r12, #PROC_ENTRY_SIZE 824 b 1b 825 826/* 827 * Table for cache operations. This is basically: 828 * - CPU ID match 829 * - CPU ID mask 830 * - 'cache on' method instruction 831 * - 'cache off' method instruction 832 * - 'cache flush' method instruction 833 * 834 * We match an entry using: ((real_id ^ match) & mask) == 0 835 * 836 * Writethrough caches generally only need 'on' and 'off' 837 * methods. Writeback caches _must_ have the flush method 838 * defined. 839 */ 840 .align 2 841 .type proc_types,#object 842proc_types: 843 .word 0x41000000 @ old ARM ID 844 .word 0xff00f000 845 mov pc, lr 846 THUMB( nop ) 847 mov pc, lr 848 THUMB( nop ) 849 mov pc, lr 850 THUMB( nop ) 851 852 .word 0x41007000 @ ARM7/710 853 .word 0xfff8fe00 854 mov pc, lr 855 THUMB( nop ) 856 mov pc, lr 857 THUMB( nop ) 858 mov pc, lr 859 THUMB( nop ) 860 861 .word 0x41807200 @ ARM720T (writethrough) 862 .word 0xffffff00 863 W(b) __armv4_mmu_cache_on 864 W(b) __armv4_mmu_cache_off 865 mov pc, lr 866 THUMB( nop ) 867 868 .word 0x41007400 @ ARM74x 869 .word 0xff00ff00 870 W(b) __armv3_mpu_cache_on 871 W(b) __armv3_mpu_cache_off 872 W(b) __armv3_mpu_cache_flush 873 874 .word 0x41009400 @ ARM94x 875 .word 0xff00ff00 876 W(b) __armv4_mpu_cache_on 877 W(b) __armv4_mpu_cache_off 878 W(b) __armv4_mpu_cache_flush 879 880 .word 0x41069260 @ ARM926EJ-S (v5TEJ) 881 .word 0xff0ffff0 882 W(b) __arm926ejs_mmu_cache_on 883 W(b) __armv4_mmu_cache_off 884 W(b) __armv5tej_mmu_cache_flush 885 886 .word 0x00007000 @ ARM7 IDs 887 .word 0x0000f000 888 mov pc, lr 889 THUMB( nop ) 890 mov pc, lr 891 THUMB( nop ) 892 mov pc, lr 893 THUMB( nop ) 894 895 @ Everything from here on will be the new ID system. 896 897 .word 0x4401a100 @ sa110 / sa1100 898 .word 0xffffffe0 899 W(b) __armv4_mmu_cache_on 900 W(b) __armv4_mmu_cache_off 901 W(b) __armv4_mmu_cache_flush 902 903 .word 0x6901b110 @ sa1110 904 .word 0xfffffff0 905 W(b) __armv4_mmu_cache_on 906 W(b) __armv4_mmu_cache_off 907 W(b) __armv4_mmu_cache_flush 908 909 .word 0x56056900 910 .word 0xffffff00 @ PXA9xx 911 W(b) __armv4_mmu_cache_on 912 W(b) __armv4_mmu_cache_off 913 W(b) __armv4_mmu_cache_flush 914 915 .word 0x56158000 @ PXA168 916 .word 0xfffff000 917 W(b) __armv4_mmu_cache_on 918 W(b) __armv4_mmu_cache_off 919 W(b) __armv5tej_mmu_cache_flush 920 921 .word 0x56050000 @ Feroceon 922 .word 0xff0f0000 923 W(b) __armv4_mmu_cache_on 924 W(b) __armv4_mmu_cache_off 925 W(b) __armv5tej_mmu_cache_flush 926 927#ifdef CONFIG_CPU_FEROCEON_OLD_ID 928 /* this conflicts with the standard ARMv5TE entry */ 929 .long 0x41009260 @ Old Feroceon 930 .long 0xff00fff0 931 b __armv4_mmu_cache_on 932 b __armv4_mmu_cache_off 933 b __armv5tej_mmu_cache_flush 934#endif 935 936 .word 0x66015261 @ FA526 937 .word 0xff01fff1 938 W(b) __fa526_cache_on 939 W(b) __armv4_mmu_cache_off 940 W(b) __fa526_cache_flush 941 942 @ These match on the architecture ID 943 944 .word 0x00020000 @ ARMv4T 945 .word 0x000f0000 946 W(b) __armv4_mmu_cache_on 947 W(b) __armv4_mmu_cache_off 948 W(b) __armv4_mmu_cache_flush 949 950 .word 0x00050000 @ ARMv5TE 951 .word 0x000f0000 952 W(b) __armv4_mmu_cache_on 953 W(b) __armv4_mmu_cache_off 954 W(b) __armv4_mmu_cache_flush 955 956 .word 0x00060000 @ ARMv5TEJ 957 .word 0x000f0000 958 W(b) __armv4_mmu_cache_on 959 W(b) __armv4_mmu_cache_off 960 W(b) __armv5tej_mmu_cache_flush 961 962 .word 0x0007b000 @ ARMv6 963 .word 0x000ff000 964 W(b) __armv6_mmu_cache_on 965 W(b) __armv4_mmu_cache_off 966 W(b) __armv6_mmu_cache_flush 967 968 .word 0x000f0000 @ new CPU Id 969 .word 0x000f0000 970 W(b) __armv7_mmu_cache_on 971 W(b) __armv7_mmu_cache_off 972 W(b) __armv7_mmu_cache_flush 973 974 .word 0 @ unrecognised type 975 .word 0 976 mov pc, lr 977 THUMB( nop ) 978 mov pc, lr 979 THUMB( nop ) 980 mov pc, lr 981 THUMB( nop ) 982 983 .size proc_types, . - proc_types 984 985 /* 986 * If you get a "non-constant expression in ".if" statement" 987 * error from the assembler on this line, check that you have 988 * not accidentally written a "b" instruction where you should 989 * have written W(b). 990 */ 991 .if (. - proc_types) % PROC_ENTRY_SIZE != 0 992 .error "The size of one or more proc_types entries is wrong." 993 .endif 994 995/* 996 * Turn off the Cache and MMU. ARMv3 does not support 997 * reading the control register, but ARMv4 does. 998 * 999 * On exit, 1000 * r0, r1, r2, r3, r9, r12 corrupted 1001 * This routine must preserve: 1002 * r4, r7, r8 1003 */ 1004 .align 5 1005cache_off: mov r3, #12 @ cache_off function 1006 b call_cache_fn 1007 1008__armv4_mpu_cache_off: 1009 mrc p15, 0, r0, c1, c0 1010 bic r0, r0, #0x000d 1011 mcr p15, 0, r0, c1, c0 @ turn MPU and cache off 1012 mov r0, #0 1013 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 1014 mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache 1015 mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache 1016 mov pc, lr 1017 1018__armv3_mpu_cache_off: 1019 mrc p15, 0, r0, c1, c0 1020 bic r0, r0, #0x000d 1021 mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off 1022 mov r0, #0 1023 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 1024 mov pc, lr 1025 1026__armv4_mmu_cache_off: 1027#ifdef CONFIG_MMU 1028 mrc p15, 0, r0, c1, c0 1029 bic r0, r0, #0x000d 1030 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off 1031 mov r0, #0 1032 mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4 1033 mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4 1034#endif 1035 mov pc, lr 1036 1037__armv7_mmu_cache_off: 1038 mrc p15, 0, r0, c1, c0 1039#ifdef CONFIG_MMU 1040 bic r0, r0, #0x000d 1041#else 1042 bic r0, r0, #0x000c 1043#endif 1044 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off 1045 mov r12, lr 1046 bl __armv7_mmu_cache_flush 1047 mov r0, #0 1048#ifdef CONFIG_MMU 1049 mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB 1050#endif 1051 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC 1052 mcr p15, 0, r0, c7, c10, 4 @ DSB 1053 mcr p15, 0, r0, c7, c5, 4 @ ISB 1054 mov pc, r12 1055 1056/* 1057 * Clean and flush the cache to maintain consistency. 1058 * 1059 * On exit, 1060 * r1, r2, r3, r9, r10, r11, r12 corrupted 1061 * This routine must preserve: 1062 * r4, r6, r7, r8 1063 */ 1064 .align 5 1065cache_clean_flush: 1066 mov r3, #16 1067 b call_cache_fn 1068 1069__armv4_mpu_cache_flush: 1070 tst r4, #1 1071 movne pc, lr 1072 mov r2, #1 1073 mov r3, #0 1074 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache 1075 mov r1, #7 << 5 @ 8 segments 10761: orr r3, r1, #63 << 26 @ 64 entries 10772: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index 1078 subs r3, r3, #1 << 26 1079 bcs 2b @ entries 63 to 0 1080 subs r1, r1, #1 << 5 1081 bcs 1b @ segments 7 to 0 1082 1083 teq r2, #0 1084 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 1085 mcr p15, 0, ip, c7, c10, 4 @ drain WB 1086 mov pc, lr 1087 1088__fa526_cache_flush: 1089 tst r4, #1 1090 movne pc, lr 1091 mov r1, #0 1092 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache 1093 mcr p15, 0, r1, c7, c5, 0 @ flush I cache 1094 mcr p15, 0, r1, c7, c10, 4 @ drain WB 1095 mov pc, lr 1096 1097__armv6_mmu_cache_flush: 1098 mov r1, #0 1099 tst r4, #1 1100 mcreq p15, 0, r1, c7, c14, 0 @ clean+invalidate D 1101 mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB 1102 mcreq p15, 0, r1, c7, c15, 0 @ clean+invalidate unified 1103 mcr p15, 0, r1, c7, c10, 4 @ drain WB 1104 mov pc, lr 1105 1106__armv7_mmu_cache_flush: 1107 tst r4, #1 1108 bne iflush 1109 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1 1110 tst r10, #0xf << 16 @ hierarchical cache (ARMv7) 1111 mov r10, #0 1112 beq hierarchical 1113 mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D 1114 b iflush 1115hierarchical: 1116 mcr p15, 0, r10, c7, c10, 5 @ DMB 1117 stmfd sp!, {r0-r7, r9-r11} 1118 mrc p15, 1, r0, c0, c0, 1 @ read clidr 1119 ands r3, r0, #0x7000000 @ extract loc from clidr 1120 mov r3, r3, lsr #23 @ left align loc bit field 1121 beq finished @ if loc is 0, then no need to clean 1122 mov r10, #0 @ start clean at cache level 0 1123loop1: 1124 add r2, r10, r10, lsr #1 @ work out 3x current cache level 1125 mov r1, r0, lsr r2 @ extract cache type bits from clidr 1126 and r1, r1, #7 @ mask of the bits for current cache only 1127 cmp r1, #2 @ see what cache we have at this level 1128 blt skip @ skip if no cache, or just i-cache 1129 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 1130 mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr 1131 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr 1132 and r2, r1, #7 @ extract the length of the cache lines 1133 add r2, r2, #4 @ add 4 (line length offset) 1134 ldr r4, =0x3ff 1135 ands r4, r4, r1, lsr #3 @ find maximum number on the way size 1136 clz r5, r4 @ find bit position of way size increment 1137 ldr r7, =0x7fff 1138 ands r7, r7, r1, lsr #13 @ extract max number of the index size 1139loop2: 1140 mov r9, r4 @ create working copy of max way size 1141loop3: 1142 ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11 1143 ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11 1144 THUMB( lsl r6, r9, r5 ) 1145 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11 1146 THUMB( lsl r6, r7, r2 ) 1147 THUMB( orr r11, r11, r6 ) @ factor index number into r11 1148 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way 1149 subs r9, r9, #1 @ decrement the way 1150 bge loop3 1151 subs r7, r7, #1 @ decrement the index 1152 bge loop2 1153skip: 1154 add r10, r10, #2 @ increment cache number 1155 cmp r3, r10 1156 bgt loop1 1157finished: 1158 ldmfd sp!, {r0-r7, r9-r11} 1159 mov r10, #0 @ swith back to cache level 0 1160 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 1161iflush: 1162 mcr p15, 0, r10, c7, c10, 4 @ DSB 1163 mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB 1164 mcr p15, 0, r10, c7, c10, 4 @ DSB 1165 mcr p15, 0, r10, c7, c5, 4 @ ISB 1166 mov pc, lr 1167 1168__armv5tej_mmu_cache_flush: 1169 tst r4, #1 1170 movne pc, lr 11711: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache 1172 bne 1b 1173 mcr p15, 0, r0, c7, c5, 0 @ flush I cache 1174 mcr p15, 0, r0, c7, c10, 4 @ drain WB 1175 mov pc, lr 1176 1177__armv4_mmu_cache_flush: 1178 tst r4, #1 1179 movne pc, lr 1180 mov r2, #64*1024 @ default: 32K dcache size (*2) 1181 mov r11, #32 @ default: 32 byte line size 1182 mrc p15, 0, r3, c0, c0, 1 @ read cache type 1183 teq r3, r9 @ cache ID register present? 1184 beq no_cache_id 1185 mov r1, r3, lsr #18 1186 and r1, r1, #7 1187 mov r2, #1024 1188 mov r2, r2, lsl r1 @ base dcache size *2 1189 tst r3, #1 << 14 @ test M bit 1190 addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1 1191 mov r3, r3, lsr #12 1192 and r3, r3, #3 1193 mov r11, #8 1194 mov r11, r11, lsl r3 @ cache line size in bytes 1195no_cache_id: 1196 mov r1, pc 1197 bic r1, r1, #63 @ align to longest cache line 1198 add r2, r1, r2 11991: 1200 ARM( ldr r3, [r1], r11 ) @ s/w flush D cache 1201 THUMB( ldr r3, [r1] ) @ s/w flush D cache 1202 THUMB( add r1, r1, r11 ) 1203 teq r1, r2 1204 bne 1b 1205 1206 mcr p15, 0, r1, c7, c5, 0 @ flush I cache 1207 mcr p15, 0, r1, c7, c6, 0 @ flush D cache 1208 mcr p15, 0, r1, c7, c10, 4 @ drain WB 1209 mov pc, lr 1210 1211__armv3_mmu_cache_flush: 1212__armv3_mpu_cache_flush: 1213 tst r4, #1 1214 movne pc, lr 1215 mov r1, #0 1216 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3 1217 mov pc, lr 1218 1219/* 1220 * Various debugging routines for printing hex characters and 1221 * memory, which again must be relocatable. 1222 */ 1223#ifdef DEBUG 1224 .align 2 1225 .type phexbuf,#object 1226phexbuf: .space 12 1227 .size phexbuf, . - phexbuf 1228 1229@ phex corrupts {r0, r1, r2, r3} 1230phex: adr r3, phexbuf 1231 mov r2, #0 1232 strb r2, [r3, r1] 12331: subs r1, r1, #1 1234 movmi r0, r3 1235 bmi puts 1236 and r2, r0, #15 1237 mov r0, r0, lsr #4 1238 cmp r2, #10 1239 addge r2, r2, #7 1240 add r2, r2, #'0' 1241 strb r2, [r3, r1] 1242 b 1b 1243 1244@ puts corrupts {r0, r1, r2, r3} 1245puts: loadsp r3, r1 12461: ldrb r2, [r0], #1 1247 teq r2, #0 1248 moveq pc, lr 12492: writeb r2, r3 1250 mov r1, #0x00020000 12513: subs r1, r1, #1 1252 bne 3b 1253 teq r2, #'\n' 1254 moveq r2, #'\r' 1255 beq 2b 1256 teq r0, #0 1257 bne 1b 1258 mov pc, lr 1259@ putc corrupts {r0, r1, r2, r3} 1260putc: 1261 mov r2, r0 1262 mov r0, #0 1263 loadsp r3, r1 1264 b 2b 1265 1266@ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr} 1267memdump: mov r12, r0 1268 mov r10, lr 1269 mov r11, #0 12702: mov r0, r11, lsl #2 1271 add r0, r0, r12 1272 mov r1, #8 1273 bl phex 1274 mov r0, #':' 1275 bl putc 12761: mov r0, #' ' 1277 bl putc 1278 ldr r0, [r12, r11, lsl #2] 1279 mov r1, #8 1280 bl phex 1281 and r0, r11, #7 1282 teq r0, #3 1283 moveq r0, #' ' 1284 bleq putc 1285 and r0, r11, #7 1286 add r11, r11, #1 1287 teq r0, #7 1288 bne 1b 1289 mov r0, #'\n' 1290 bl putc 1291 cmp r11, #64 1292 blt 2b 1293 mov pc, r10 1294#endif 1295 1296 .ltorg 1297 1298#ifdef CONFIG_ARM_VIRT_EXT 1299.align 5 1300__hyp_reentry_vectors: 1301 W(b) . @ reset 1302 W(b) . @ undef 1303 W(b) . @ svc 1304 W(b) . @ pabort 1305 W(b) . @ dabort 1306 W(b) __enter_kernel @ hyp 1307 W(b) . @ irq 1308 W(b) . @ fiq 1309#endif /* CONFIG_ARM_VIRT_EXT */ 1310 1311__enter_kernel: 1312 mov r0, #0 @ must be 0 1313 ARM( mov pc, r4 ) @ call kernel 1314 THUMB( bx r4 ) @ entry point is always ARM 1315 1316reloc_code_end: 1317 1318 .align 1319 .section ".stack", "aw", %nobits 1320.L_user_stack: .space 4096 1321.L_user_stack_end: 1322