1/* 2 * linux/arch/arm/boot/compressed/head.S 3 * 4 * Copyright (C) 1996-2002 Russell King 5 * Copyright (C) 2004 Hyok S. Choi (MPU support) 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11#include <linux/linkage.h> 12 13/* 14 * Debugging stuff 15 * 16 * Note that these macros must not contain any code which is not 17 * 100% relocatable. Any attempt to do so will result in a crash. 18 * Please select one of the following when turning on debugging. 19 */ 20#ifdef DEBUG 21 22#if defined(CONFIG_DEBUG_ICEDCC) 23 24#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) 25 .macro loadsp, rb, tmp 26 .endm 27 .macro writeb, ch, rb 28 mcr p14, 0, \ch, c0, c5, 0 29 .endm 30#elif defined(CONFIG_CPU_XSCALE) 31 .macro loadsp, rb, tmp 32 .endm 33 .macro writeb, ch, rb 34 mcr p14, 0, \ch, c8, c0, 0 35 .endm 36#else 37 .macro loadsp, rb, tmp 38 .endm 39 .macro writeb, ch, rb 40 mcr p14, 0, \ch, c1, c0, 0 41 .endm 42#endif 43 44#else 45 46#include <mach/debug-macro.S> 47 48 .macro writeb, ch, rb 49 senduart \ch, \rb 50 .endm 51 52#if defined(CONFIG_ARCH_SA1100) 53 .macro loadsp, rb, tmp 54 mov \rb, #0x80000000 @ physical base address 55#ifdef CONFIG_DEBUG_LL_SER3 56 add \rb, \rb, #0x00050000 @ Ser3 57#else 58 add \rb, \rb, #0x00010000 @ Ser1 59#endif 60 .endm 61#elif defined(CONFIG_ARCH_S3C2410) 62 .macro loadsp, rb, tmp 63 mov \rb, #0x50000000 64 add \rb, \rb, #0x4000 * CONFIG_S3C_LOWLEVEL_UART_PORT 65 .endm 66#else 67 .macro loadsp, rb, tmp 68 addruart \rb, \tmp 69 .endm 70#endif 71#endif 72#endif 73 74 .macro kputc,val 75 mov r0, \val 76 bl putc 77 .endm 78 79 .macro kphex,val,len 80 mov r0, \val 81 mov r1, #\len 82 bl phex 83 .endm 84 85 .macro debug_reloc_start 86#ifdef DEBUG 87 kputc #'\n' 88 kphex r6, 8 /* processor id */ 89 kputc #':' 90 kphex r7, 8 /* architecture id */ 91#ifdef CONFIG_CPU_CP15 92 kputc #':' 93 mrc p15, 0, r0, c1, c0 94 kphex r0, 8 /* control reg */ 95#endif 96 kputc #'\n' 97 kphex r5, 8 /* decompressed kernel start */ 98 kputc #'-' 99 kphex r9, 8 /* decompressed kernel end */ 100 kputc #'>' 101 kphex r4, 8 /* kernel execution address */ 102 kputc #'\n' 103#endif 104 .endm 105 106 .macro debug_reloc_end 107#ifdef DEBUG 108 kphex r5, 8 /* end of kernel */ 109 kputc #'\n' 110 mov r0, r4 111 bl memdump /* dump 256 bytes at start of kernel */ 112#endif 113 .endm 114 115 .section ".start", #alloc, #execinstr 116/* 117 * sort out different calling conventions 118 */ 119 .align 120 .arm @ Always enter in ARM state 121start: 122 .type start,#function 123 .rept 7 124 mov r0, r0 125 .endr 126 ARM( mov r0, r0 ) 127 ARM( b 1f ) 128 THUMB( adr r12, BSYM(1f) ) 129 THUMB( bx r12 ) 130 131 .word 0x016f2818 @ Magic numbers to help the loader 132 .word start @ absolute load/run zImage address 133 .word _edata @ zImage end address 134 THUMB( .thumb ) 1351: mov r7, r1 @ save architecture ID 136 mov r8, r2 @ save atags pointer 137 138#ifndef __ARM_ARCH_2__ 139 /* 140 * Booting from Angel - need to enter SVC mode and disable 141 * FIQs/IRQs (numeric definitions from angel arm.h source). 142 * We only do this if we were in user mode on entry. 143 */ 144 mrs r2, cpsr @ get current mode 145 tst r2, #3 @ not user? 146 bne not_angel 147 mov r0, #0x17 @ angel_SWIreason_EnterSVC 148 ARM( swi 0x123456 ) @ angel_SWI_ARM 149 THUMB( svc 0xab ) @ angel_SWI_THUMB 150not_angel: 151 mrs r2, cpsr @ turn off interrupts to 152 orr r2, r2, #0xc0 @ prevent angel from running 153 msr cpsr_c, r2 154#else 155 teqp pc, #0x0c000003 @ turn off interrupts 156#endif 157 158 /* 159 * Note that some cache flushing and other stuff may 160 * be needed here - is there an Angel SWI call for this? 161 */ 162 163 /* 164 * some architecture specific code can be inserted 165 * by the linker here, but it should preserve r7, r8, and r9. 166 */ 167 168 .text 169 170#ifdef CONFIG_AUTO_ZRELADDR 171 @ determine final kernel image address 172 mov r4, pc 173 and r4, r4, #0xf8000000 174 add r4, r4, #TEXT_OFFSET 175#else 176 ldr r4, =zreladdr 177#endif 178 179 bl cache_on 180 181restart: adr r0, LC0 182 ldmia r0, {r1, r2, r3, r6, r9, r11, r12} 183 ldr sp, [r0, #28] 184 185 /* 186 * We might be running at a different address. We need 187 * to fix up various pointers. 188 */ 189 sub r0, r0, r1 @ calculate the delta offset 190 add r6, r6, r0 @ _edata 191 192#ifndef CONFIG_ZBOOT_ROM 193 /* malloc space is above the relocated stack (64k max) */ 194 add sp, sp, r0 195 add r10, sp, #0x10000 196#else 197 /* 198 * With ZBOOT_ROM the bss/stack is non relocatable, 199 * but someone could still run this code from RAM, 200 * in which case our reference is _edata. 201 */ 202 mov r10, r6 203#endif 204 205/* 206 * Check to see if we will overwrite ourselves. 207 * r4 = final kernel address 208 * r9 = size of decompressed image 209 * r10 = end of this image, including bss/stack/malloc space if non XIP 210 * We basically want: 211 * r4 - 16k page directory >= r10 -> OK 212 * r4 + image length <= current position (pc) -> OK 213 */ 214 add r10, r10, #16384 215 cmp r4, r10 216 bhs wont_overwrite 217 add r10, r4, r9 218 ARM( cmp r10, pc ) 219 THUMB( mov lr, pc ) 220 THUMB( cmp r10, lr ) 221 bls wont_overwrite 222 223/* 224 * Relocate ourselves past the end of the decompressed kernel. 225 * r6 = _edata 226 * r10 = end of the decompressed kernel 227 * Because we always copy ahead, we need to do it from the end and go 228 * backward in case the source and destination overlap. 229 */ 230 /* 231 * Bump to the next 256-byte boundary with the size of 232 * the relocation code added. This avoids overwriting 233 * ourself when the offset is small. 234 */ 235 add r10, r10, #((reloc_code_end - restart + 256) & ~255) 236 bic r10, r10, #255 237 238 /* Get start of code we want to copy and align it down. */ 239 adr r5, restart 240 bic r5, r5, #31 241 242 sub r9, r6, r5 @ size to copy 243 add r9, r9, #31 @ rounded up to a multiple 244 bic r9, r9, #31 @ ... of 32 bytes 245 add r6, r9, r5 246 add r9, r9, r10 247 2481: ldmdb r6!, {r0 - r3, r10 - r12, lr} 249 cmp r6, r5 250 stmdb r9!, {r0 - r3, r10 - r12, lr} 251 bhi 1b 252 253 /* Preserve offset to relocated code. */ 254 sub r6, r9, r6 255 256#ifndef CONFIG_ZBOOT_ROM 257 /* cache_clean_flush may use the stack, so relocate it */ 258 add sp, sp, r6 259#endif 260 261 bl cache_clean_flush 262 263 adr r0, BSYM(restart) 264 add r0, r0, r6 265 mov pc, r0 266 267wont_overwrite: 268/* 269 * If delta is zero, we are running at the address we were linked at. 270 * r0 = delta 271 * r2 = BSS start 272 * r3 = BSS end 273 * r4 = kernel execution address 274 * r7 = architecture ID 275 * r8 = atags pointer 276 * r11 = GOT start 277 * r12 = GOT end 278 * sp = stack pointer 279 */ 280 teq r0, #0 281 beq not_relocated 282 add r11, r11, r0 283 add r12, r12, r0 284 285#ifndef CONFIG_ZBOOT_ROM 286 /* 287 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n, 288 * we need to fix up pointers into the BSS region. 289 * Note that the stack pointer has already been fixed up. 290 */ 291 add r2, r2, r0 292 add r3, r3, r0 293 294 /* 295 * Relocate all entries in the GOT table. 296 */ 2971: ldr r1, [r11, #0] @ relocate entries in the GOT 298 add r1, r1, r0 @ table. This fixes up the 299 str r1, [r11], #4 @ C references. 300 cmp r11, r12 301 blo 1b 302#else 303 304 /* 305 * Relocate entries in the GOT table. We only relocate 306 * the entries that are outside the (relocated) BSS region. 307 */ 3081: ldr r1, [r11, #0] @ relocate entries in the GOT 309 cmp r1, r2 @ entry < bss_start || 310 cmphs r3, r1 @ _end < entry 311 addlo r1, r1, r0 @ table. This fixes up the 312 str r1, [r11], #4 @ C references. 313 cmp r11, r12 314 blo 1b 315#endif 316 317not_relocated: mov r0, #0 3181: str r0, [r2], #4 @ clear bss 319 str r0, [r2], #4 320 str r0, [r2], #4 321 str r0, [r2], #4 322 cmp r2, r3 323 blo 1b 324 325/* 326 * The C runtime environment should now be setup sufficiently. 327 * Set up some pointers, and start decompressing. 328 * r4 = kernel execution address 329 * r7 = architecture ID 330 * r8 = atags pointer 331 */ 332 mov r0, r4 333 mov r1, sp @ malloc space above stack 334 add r2, sp, #0x10000 @ 64k max 335 mov r3, r7 336 bl decompress_kernel 337 bl cache_clean_flush 338 bl cache_off 339 mov r0, #0 @ must be zero 340 mov r1, r7 @ restore architecture number 341 mov r2, r8 @ restore atags pointer 342 mov pc, r4 @ call kernel 343 344 .align 2 345 .type LC0, #object 346LC0: .word LC0 @ r1 347 .word __bss_start @ r2 348 .word _end @ r3 349 .word _edata @ r6 350 .word _image_size @ r9 351 .word _got_start @ r11 352 .word _got_end @ ip 353 .word user_stack_end @ sp 354 .size LC0, . - LC0 355 356#ifdef CONFIG_ARCH_RPC 357 .globl params 358params: ldr r0, =0x10000100 @ params_phys for RPC 359 mov pc, lr 360 .ltorg 361 .align 362#endif 363 364/* 365 * Turn on the cache. We need to setup some page tables so that we 366 * can have both the I and D caches on. 367 * 368 * We place the page tables 16k down from the kernel execution address, 369 * and we hope that nothing else is using it. If we're using it, we 370 * will go pop! 371 * 372 * On entry, 373 * r4 = kernel execution address 374 * r7 = architecture number 375 * r8 = atags pointer 376 * On exit, 377 * r0, r1, r2, r3, r9, r10, r12 corrupted 378 * This routine must preserve: 379 * r4, r7, r8 380 */ 381 .align 5 382cache_on: mov r3, #8 @ cache_on function 383 b call_cache_fn 384 385/* 386 * Initialize the highest priority protection region, PR7 387 * to cover all 32bit address and cacheable and bufferable. 388 */ 389__armv4_mpu_cache_on: 390 mov r0, #0x3f @ 4G, the whole 391 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting 392 mcr p15, 0, r0, c6, c7, 1 393 394 mov r0, #0x80 @ PR7 395 mcr p15, 0, r0, c2, c0, 0 @ D-cache on 396 mcr p15, 0, r0, c2, c0, 1 @ I-cache on 397 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on 398 399 mov r0, #0xc000 400 mcr p15, 0, r0, c5, c0, 1 @ I-access permission 401 mcr p15, 0, r0, c5, c0, 0 @ D-access permission 402 403 mov r0, #0 404 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 405 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache 406 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache 407 mrc p15, 0, r0, c1, c0, 0 @ read control reg 408 @ ...I .... ..D. WC.M 409 orr r0, r0, #0x002d @ .... .... ..1. 11.1 410 orr r0, r0, #0x1000 @ ...1 .... .... .... 411 412 mcr p15, 0, r0, c1, c0, 0 @ write control reg 413 414 mov r0, #0 415 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache 416 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache 417 mov pc, lr 418 419__armv3_mpu_cache_on: 420 mov r0, #0x3f @ 4G, the whole 421 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting 422 423 mov r0, #0x80 @ PR7 424 mcr p15, 0, r0, c2, c0, 0 @ cache on 425 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on 426 427 mov r0, #0xc000 428 mcr p15, 0, r0, c5, c0, 0 @ access permission 429 430 mov r0, #0 431 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 432 /* 433 * ?? ARMv3 MMU does not allow reading the control register, 434 * does this really work on ARMv3 MPU? 435 */ 436 mrc p15, 0, r0, c1, c0, 0 @ read control reg 437 @ .... .... .... WC.M 438 orr r0, r0, #0x000d @ .... .... .... 11.1 439 /* ?? this overwrites the value constructed above? */ 440 mov r0, #0 441 mcr p15, 0, r0, c1, c0, 0 @ write control reg 442 443 /* ?? invalidate for the second time? */ 444 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 445 mov pc, lr 446 447__setup_mmu: sub r3, r4, #16384 @ Page directory size 448 bic r3, r3, #0xff @ Align the pointer 449 bic r3, r3, #0x3f00 450/* 451 * Initialise the page tables, turning on the cacheable and bufferable 452 * bits for the RAM area only. 453 */ 454 mov r0, r3 455 mov r9, r0, lsr #18 456 mov r9, r9, lsl #18 @ start of RAM 457 add r10, r9, #0x10000000 @ a reasonable RAM size 458 mov r1, #0x12 459 orr r1, r1, #3 << 10 460 add r2, r3, #16384 4611: cmp r1, r9 @ if virt > start of RAM 462 orrhs r1, r1, #0x0c @ set cacheable, bufferable 463 cmp r1, r10 @ if virt > end of RAM 464 bichs r1, r1, #0x0c @ clear cacheable, bufferable 465 str r1, [r0], #4 @ 1:1 mapping 466 add r1, r1, #1048576 467 teq r0, r2 468 bne 1b 469/* 470 * If ever we are running from Flash, then we surely want the cache 471 * to be enabled also for our execution instance... We map 2MB of it 472 * so there is no map overlap problem for up to 1 MB compressed kernel. 473 * If the execution is in RAM then we would only be duplicating the above. 474 */ 475 mov r1, #0x1e 476 orr r1, r1, #3 << 10 477 mov r2, pc 478 mov r2, r2, lsr #20 479 orr r1, r1, r2, lsl #20 480 add r0, r3, r2, lsl #2 481 str r1, [r0], #4 482 add r1, r1, #1048576 483 str r1, [r0] 484 mov pc, lr 485ENDPROC(__setup_mmu) 486 487__armv4_mmu_cache_on: 488 mov r12, lr 489#ifdef CONFIG_MMU 490 bl __setup_mmu 491 mov r0, #0 492 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 493 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs 494 mrc p15, 0, r0, c1, c0, 0 @ read control reg 495 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement 496 orr r0, r0, #0x0030 497#ifdef CONFIG_CPU_ENDIAN_BE8 498 orr r0, r0, #1 << 25 @ big-endian page tables 499#endif 500 bl __common_mmu_cache_on 501 mov r0, #0 502 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs 503#endif 504 mov pc, r12 505 506__armv7_mmu_cache_on: 507 mov r12, lr 508#ifdef CONFIG_MMU 509 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0 510 tst r11, #0xf @ VMSA 511 blne __setup_mmu 512 mov r0, #0 513 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 514 tst r11, #0xf @ VMSA 515 mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs 516#endif 517 mrc p15, 0, r0, c1, c0, 0 @ read control reg 518 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement 519 orr r0, r0, #0x003c @ write buffer 520#ifdef CONFIG_MMU 521#ifdef CONFIG_CPU_ENDIAN_BE8 522 orr r0, r0, #1 << 25 @ big-endian page tables 523#endif 524 orrne r0, r0, #1 @ MMU enabled 525 movne r1, #-1 526 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer 527 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control 528#endif 529 mcr p15, 0, r0, c1, c0, 0 @ load control register 530 mrc p15, 0, r0, c1, c0, 0 @ and read it back 531 mov r0, #0 532 mcr p15, 0, r0, c7, c5, 4 @ ISB 533 mov pc, r12 534 535__fa526_cache_on: 536 mov r12, lr 537 bl __setup_mmu 538 mov r0, #0 539 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache 540 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 541 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB 542 mrc p15, 0, r0, c1, c0, 0 @ read control reg 543 orr r0, r0, #0x1000 @ I-cache enable 544 bl __common_mmu_cache_on 545 mov r0, #0 546 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB 547 mov pc, r12 548 549__arm6_mmu_cache_on: 550 mov r12, lr 551 bl __setup_mmu 552 mov r0, #0 553 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 554 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3 555 mov r0, #0x30 556 bl __common_mmu_cache_on 557 mov r0, #0 558 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3 559 mov pc, r12 560 561__common_mmu_cache_on: 562#ifndef CONFIG_THUMB2_KERNEL 563#ifndef DEBUG 564 orr r0, r0, #0x000d @ Write buffer, mmu 565#endif 566 mov r1, #-1 567 mcr p15, 0, r3, c2, c0, 0 @ load page table pointer 568 mcr p15, 0, r1, c3, c0, 0 @ load domain access control 569 b 1f 570 .align 5 @ cache line aligned 5711: mcr p15, 0, r0, c1, c0, 0 @ load control register 572 mrc p15, 0, r0, c1, c0, 0 @ and read it back to 573 sub pc, lr, r0, lsr #32 @ properly flush pipeline 574#endif 575 576/* 577 * Here follow the relocatable cache support functions for the 578 * various processors. This is a generic hook for locating an 579 * entry and jumping to an instruction at the specified offset 580 * from the start of the block. Please note this is all position 581 * independent code. 582 * 583 * r1 = corrupted 584 * r2 = corrupted 585 * r3 = block offset 586 * r9 = corrupted 587 * r12 = corrupted 588 */ 589 590call_cache_fn: adr r12, proc_types 591#ifdef CONFIG_CPU_CP15 592 mrc p15, 0, r9, c0, c0 @ get processor ID 593#else 594 ldr r9, =CONFIG_PROCESSOR_ID 595#endif 5961: ldr r1, [r12, #0] @ get value 597 ldr r2, [r12, #4] @ get mask 598 eor r1, r1, r9 @ (real ^ match) 599 tst r1, r2 @ & mask 600 ARM( addeq pc, r12, r3 ) @ call cache function 601 THUMB( addeq r12, r3 ) 602 THUMB( moveq pc, r12 ) @ call cache function 603 add r12, r12, #4*5 604 b 1b 605 606/* 607 * Table for cache operations. This is basically: 608 * - CPU ID match 609 * - CPU ID mask 610 * - 'cache on' method instruction 611 * - 'cache off' method instruction 612 * - 'cache flush' method instruction 613 * 614 * We match an entry using: ((real_id ^ match) & mask) == 0 615 * 616 * Writethrough caches generally only need 'on' and 'off' 617 * methods. Writeback caches _must_ have the flush method 618 * defined. 619 */ 620 .align 2 621 .type proc_types,#object 622proc_types: 623 .word 0x41560600 @ ARM6/610 624 .word 0xffffffe0 625 W(b) __arm6_mmu_cache_off @ works, but slow 626 W(b) __arm6_mmu_cache_off 627 mov pc, lr 628 THUMB( nop ) 629@ b __arm6_mmu_cache_on @ untested 630@ b __arm6_mmu_cache_off 631@ b __armv3_mmu_cache_flush 632 633 .word 0x00000000 @ old ARM ID 634 .word 0x0000f000 635 mov pc, lr 636 THUMB( nop ) 637 mov pc, lr 638 THUMB( nop ) 639 mov pc, lr 640 THUMB( nop ) 641 642 .word 0x41007000 @ ARM7/710 643 .word 0xfff8fe00 644 W(b) __arm7_mmu_cache_off 645 W(b) __arm7_mmu_cache_off 646 mov pc, lr 647 THUMB( nop ) 648 649 .word 0x41807200 @ ARM720T (writethrough) 650 .word 0xffffff00 651 W(b) __armv4_mmu_cache_on 652 W(b) __armv4_mmu_cache_off 653 mov pc, lr 654 THUMB( nop ) 655 656 .word 0x41007400 @ ARM74x 657 .word 0xff00ff00 658 W(b) __armv3_mpu_cache_on 659 W(b) __armv3_mpu_cache_off 660 W(b) __armv3_mpu_cache_flush 661 662 .word 0x41009400 @ ARM94x 663 .word 0xff00ff00 664 W(b) __armv4_mpu_cache_on 665 W(b) __armv4_mpu_cache_off 666 W(b) __armv4_mpu_cache_flush 667 668 .word 0x00007000 @ ARM7 IDs 669 .word 0x0000f000 670 mov pc, lr 671 THUMB( nop ) 672 mov pc, lr 673 THUMB( nop ) 674 mov pc, lr 675 THUMB( nop ) 676 677 @ Everything from here on will be the new ID system. 678 679 .word 0x4401a100 @ sa110 / sa1100 680 .word 0xffffffe0 681 W(b) __armv4_mmu_cache_on 682 W(b) __armv4_mmu_cache_off 683 W(b) __armv4_mmu_cache_flush 684 685 .word 0x6901b110 @ sa1110 686 .word 0xfffffff0 687 W(b) __armv4_mmu_cache_on 688 W(b) __armv4_mmu_cache_off 689 W(b) __armv4_mmu_cache_flush 690 691 .word 0x56056900 692 .word 0xffffff00 @ PXA9xx 693 W(b) __armv4_mmu_cache_on 694 W(b) __armv4_mmu_cache_off 695 W(b) __armv4_mmu_cache_flush 696 697 .word 0x56158000 @ PXA168 698 .word 0xfffff000 699 W(b) __armv4_mmu_cache_on 700 W(b) __armv4_mmu_cache_off 701 W(b) __armv5tej_mmu_cache_flush 702 703 .word 0x56050000 @ Feroceon 704 .word 0xff0f0000 705 W(b) __armv4_mmu_cache_on 706 W(b) __armv4_mmu_cache_off 707 W(b) __armv5tej_mmu_cache_flush 708 709#ifdef CONFIG_CPU_FEROCEON_OLD_ID 710 /* this conflicts with the standard ARMv5TE entry */ 711 .long 0x41009260 @ Old Feroceon 712 .long 0xff00fff0 713 b __armv4_mmu_cache_on 714 b __armv4_mmu_cache_off 715 b __armv5tej_mmu_cache_flush 716#endif 717 718 .word 0x66015261 @ FA526 719 .word 0xff01fff1 720 W(b) __fa526_cache_on 721 W(b) __armv4_mmu_cache_off 722 W(b) __fa526_cache_flush 723 724 @ These match on the architecture ID 725 726 .word 0x00020000 @ ARMv4T 727 .word 0x000f0000 728 W(b) __armv4_mmu_cache_on 729 W(b) __armv4_mmu_cache_off 730 W(b) __armv4_mmu_cache_flush 731 732 .word 0x00050000 @ ARMv5TE 733 .word 0x000f0000 734 W(b) __armv4_mmu_cache_on 735 W(b) __armv4_mmu_cache_off 736 W(b) __armv4_mmu_cache_flush 737 738 .word 0x00060000 @ ARMv5TEJ 739 .word 0x000f0000 740 W(b) __armv4_mmu_cache_on 741 W(b) __armv4_mmu_cache_off 742 W(b) __armv5tej_mmu_cache_flush 743 744 .word 0x0007b000 @ ARMv6 745 .word 0x000ff000 746 W(b) __armv4_mmu_cache_on 747 W(b) __armv4_mmu_cache_off 748 W(b) __armv6_mmu_cache_flush 749 750 .word 0x560f5810 @ Marvell PJ4 ARMv6 751 .word 0xff0ffff0 752 W(b) __armv4_mmu_cache_on 753 W(b) __armv4_mmu_cache_off 754 W(b) __armv6_mmu_cache_flush 755 756 .word 0x000f0000 @ new CPU Id 757 .word 0x000f0000 758 W(b) __armv7_mmu_cache_on 759 W(b) __armv7_mmu_cache_off 760 W(b) __armv7_mmu_cache_flush 761 762 .word 0 @ unrecognised type 763 .word 0 764 mov pc, lr 765 THUMB( nop ) 766 mov pc, lr 767 THUMB( nop ) 768 mov pc, lr 769 THUMB( nop ) 770 771 .size proc_types, . - proc_types 772 773/* 774 * Turn off the Cache and MMU. ARMv3 does not support 775 * reading the control register, but ARMv4 does. 776 * 777 * On exit, 778 * r0, r1, r2, r3, r9, r12 corrupted 779 * This routine must preserve: 780 * r4, r7, r8 781 */ 782 .align 5 783cache_off: mov r3, #12 @ cache_off function 784 b call_cache_fn 785 786__armv4_mpu_cache_off: 787 mrc p15, 0, r0, c1, c0 788 bic r0, r0, #0x000d 789 mcr p15, 0, r0, c1, c0 @ turn MPU and cache off 790 mov r0, #0 791 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 792 mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache 793 mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache 794 mov pc, lr 795 796__armv3_mpu_cache_off: 797 mrc p15, 0, r0, c1, c0 798 bic r0, r0, #0x000d 799 mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off 800 mov r0, #0 801 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 802 mov pc, lr 803 804__armv4_mmu_cache_off: 805#ifdef CONFIG_MMU 806 mrc p15, 0, r0, c1, c0 807 bic r0, r0, #0x000d 808 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off 809 mov r0, #0 810 mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4 811 mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4 812#endif 813 mov pc, lr 814 815__armv7_mmu_cache_off: 816 mrc p15, 0, r0, c1, c0 817#ifdef CONFIG_MMU 818 bic r0, r0, #0x000d 819#else 820 bic r0, r0, #0x000c 821#endif 822 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off 823 mov r12, lr 824 bl __armv7_mmu_cache_flush 825 mov r0, #0 826#ifdef CONFIG_MMU 827 mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB 828#endif 829 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC 830 mcr p15, 0, r0, c7, c10, 4 @ DSB 831 mcr p15, 0, r0, c7, c5, 4 @ ISB 832 mov pc, r12 833 834__arm6_mmu_cache_off: 835 mov r0, #0x00000030 @ ARM6 control reg. 836 b __armv3_mmu_cache_off 837 838__arm7_mmu_cache_off: 839 mov r0, #0x00000070 @ ARM7 control reg. 840 b __armv3_mmu_cache_off 841 842__armv3_mmu_cache_off: 843 mcr p15, 0, r0, c1, c0, 0 @ turn MMU and cache off 844 mov r0, #0 845 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 846 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3 847 mov pc, lr 848 849/* 850 * Clean and flush the cache to maintain consistency. 851 * 852 * On exit, 853 * r1, r2, r3, r9, r10, r11, r12 corrupted 854 * This routine must preserve: 855 * r4, r6, r7, r8 856 */ 857 .align 5 858cache_clean_flush: 859 mov r3, #16 860 b call_cache_fn 861 862__armv4_mpu_cache_flush: 863 mov r2, #1 864 mov r3, #0 865 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache 866 mov r1, #7 << 5 @ 8 segments 8671: orr r3, r1, #63 << 26 @ 64 entries 8682: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index 869 subs r3, r3, #1 << 26 870 bcs 2b @ entries 63 to 0 871 subs r1, r1, #1 << 5 872 bcs 1b @ segments 7 to 0 873 874 teq r2, #0 875 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 876 mcr p15, 0, ip, c7, c10, 4 @ drain WB 877 mov pc, lr 878 879__fa526_cache_flush: 880 mov r1, #0 881 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache 882 mcr p15, 0, r1, c7, c5, 0 @ flush I cache 883 mcr p15, 0, r1, c7, c10, 4 @ drain WB 884 mov pc, lr 885 886__armv6_mmu_cache_flush: 887 mov r1, #0 888 mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D 889 mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB 890 mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified 891 mcr p15, 0, r1, c7, c10, 4 @ drain WB 892 mov pc, lr 893 894__armv7_mmu_cache_flush: 895 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1 896 tst r10, #0xf << 16 @ hierarchical cache (ARMv7) 897 mov r10, #0 898 beq hierarchical 899 mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D 900 b iflush 901hierarchical: 902 mcr p15, 0, r10, c7, c10, 5 @ DMB 903 stmfd sp!, {r0-r7, r9-r11} 904 mrc p15, 1, r0, c0, c0, 1 @ read clidr 905 ands r3, r0, #0x7000000 @ extract loc from clidr 906 mov r3, r3, lsr #23 @ left align loc bit field 907 beq finished @ if loc is 0, then no need to clean 908 mov r10, #0 @ start clean at cache level 0 909loop1: 910 add r2, r10, r10, lsr #1 @ work out 3x current cache level 911 mov r1, r0, lsr r2 @ extract cache type bits from clidr 912 and r1, r1, #7 @ mask of the bits for current cache only 913 cmp r1, #2 @ see what cache we have at this level 914 blt skip @ skip if no cache, or just i-cache 915 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 916 mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr 917 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr 918 and r2, r1, #7 @ extract the length of the cache lines 919 add r2, r2, #4 @ add 4 (line length offset) 920 ldr r4, =0x3ff 921 ands r4, r4, r1, lsr #3 @ find maximum number on the way size 922 clz r5, r4 @ find bit position of way size increment 923 ldr r7, =0x7fff 924 ands r7, r7, r1, lsr #13 @ extract max number of the index size 925loop2: 926 mov r9, r4 @ create working copy of max way size 927loop3: 928 ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11 929 ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11 930 THUMB( lsl r6, r9, r5 ) 931 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11 932 THUMB( lsl r6, r7, r2 ) 933 THUMB( orr r11, r11, r6 ) @ factor index number into r11 934 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way 935 subs r9, r9, #1 @ decrement the way 936 bge loop3 937 subs r7, r7, #1 @ decrement the index 938 bge loop2 939skip: 940 add r10, r10, #2 @ increment cache number 941 cmp r3, r10 942 bgt loop1 943finished: 944 ldmfd sp!, {r0-r7, r9-r11} 945 mov r10, #0 @ swith back to cache level 0 946 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 947iflush: 948 mcr p15, 0, r10, c7, c10, 4 @ DSB 949 mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB 950 mcr p15, 0, r10, c7, c10, 4 @ DSB 951 mcr p15, 0, r10, c7, c5, 4 @ ISB 952 mov pc, lr 953 954__armv5tej_mmu_cache_flush: 9551: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache 956 bne 1b 957 mcr p15, 0, r0, c7, c5, 0 @ flush I cache 958 mcr p15, 0, r0, c7, c10, 4 @ drain WB 959 mov pc, lr 960 961__armv4_mmu_cache_flush: 962 mov r2, #64*1024 @ default: 32K dcache size (*2) 963 mov r11, #32 @ default: 32 byte line size 964 mrc p15, 0, r3, c0, c0, 1 @ read cache type 965 teq r3, r9 @ cache ID register present? 966 beq no_cache_id 967 mov r1, r3, lsr #18 968 and r1, r1, #7 969 mov r2, #1024 970 mov r2, r2, lsl r1 @ base dcache size *2 971 tst r3, #1 << 14 @ test M bit 972 addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1 973 mov r3, r3, lsr #12 974 and r3, r3, #3 975 mov r11, #8 976 mov r11, r11, lsl r3 @ cache line size in bytes 977no_cache_id: 978 mov r1, pc 979 bic r1, r1, #63 @ align to longest cache line 980 add r2, r1, r2 9811: 982 ARM( ldr r3, [r1], r11 ) @ s/w flush D cache 983 THUMB( ldr r3, [r1] ) @ s/w flush D cache 984 THUMB( add r1, r1, r11 ) 985 teq r1, r2 986 bne 1b 987 988 mcr p15, 0, r1, c7, c5, 0 @ flush I cache 989 mcr p15, 0, r1, c7, c6, 0 @ flush D cache 990 mcr p15, 0, r1, c7, c10, 4 @ drain WB 991 mov pc, lr 992 993__armv3_mmu_cache_flush: 994__armv3_mpu_cache_flush: 995 mov r1, #0 996 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3 997 mov pc, lr 998 999/* 1000 * Various debugging routines for printing hex characters and 1001 * memory, which again must be relocatable. 1002 */ 1003#ifdef DEBUG 1004 .align 2 1005 .type phexbuf,#object 1006phexbuf: .space 12 1007 .size phexbuf, . - phexbuf 1008 1009@ phex corrupts {r0, r1, r2, r3} 1010phex: adr r3, phexbuf 1011 mov r2, #0 1012 strb r2, [r3, r1] 10131: subs r1, r1, #1 1014 movmi r0, r3 1015 bmi puts 1016 and r2, r0, #15 1017 mov r0, r0, lsr #4 1018 cmp r2, #10 1019 addge r2, r2, #7 1020 add r2, r2, #'0' 1021 strb r2, [r3, r1] 1022 b 1b 1023 1024@ puts corrupts {r0, r1, r2, r3} 1025puts: loadsp r3, r1 10261: ldrb r2, [r0], #1 1027 teq r2, #0 1028 moveq pc, lr 10292: writeb r2, r3 1030 mov r1, #0x00020000 10313: subs r1, r1, #1 1032 bne 3b 1033 teq r2, #'\n' 1034 moveq r2, #'\r' 1035 beq 2b 1036 teq r0, #0 1037 bne 1b 1038 mov pc, lr 1039@ putc corrupts {r0, r1, r2, r3} 1040putc: 1041 mov r2, r0 1042 mov r0, #0 1043 loadsp r3, r1 1044 b 2b 1045 1046@ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr} 1047memdump: mov r12, r0 1048 mov r10, lr 1049 mov r11, #0 10502: mov r0, r11, lsl #2 1051 add r0, r0, r12 1052 mov r1, #8 1053 bl phex 1054 mov r0, #':' 1055 bl putc 10561: mov r0, #' ' 1057 bl putc 1058 ldr r0, [r12, r11, lsl #2] 1059 mov r1, #8 1060 bl phex 1061 and r0, r11, #7 1062 teq r0, #3 1063 moveq r0, #' ' 1064 bleq putc 1065 and r0, r11, #7 1066 add r11, r11, #1 1067 teq r0, #7 1068 bne 1b 1069 mov r0, #'\n' 1070 bl putc 1071 cmp r11, #64 1072 blt 2b 1073 mov pc, r10 1074#endif 1075 1076 .ltorg 1077reloc_code_end: 1078 1079 .align 1080 .section ".stack", "aw", %nobits 1081user_stack: .space 4096 1082user_stack_end: 1083