1/* 2 * linux/arch/arm/kernel/head.S 3 * 4 * Copyright (C) 1994-2002 Russell King 5 * Copyright (c) 2003 ARM Limited 6 * All Rights Reserved 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * Kernel startup code for all 32-bit CPUs 13 */ 14#include <linux/config.h> 15#include <linux/linkage.h> 16#include <linux/init.h> 17 18#include <asm/assembler.h> 19#include <asm/domain.h> 20#include <asm/mach-types.h> 21#include <asm/procinfo.h> 22#include <asm/ptrace.h> 23#include <asm/asm-offsets.h> 24#include <asm/memory.h> 25#include <asm/thread_info.h> 26#include <asm/system.h> 27 28#define PROCINFO_MMUFLAGS 8 29#define PROCINFO_INITFUNC 12 30 31#define MACHINFO_TYPE 0 32#define MACHINFO_PHYSRAM 4 33#define MACHINFO_PHYSIO 8 34#define MACHINFO_PGOFFIO 12 35#define MACHINFO_NAME 16 36 37/* 38 * swapper_pg_dir is the virtual address of the initial page table. 39 * We place the page tables 16K below KERNEL_RAM_ADDR. Therefore, we must 40 * make sure that KERNEL_RAM_ADDR is correctly set. Currently, we expect 41 * the least significant 16 bits to be 0x8000, but we could probably 42 * relax this restriction to KERNEL_RAM_ADDR >= PAGE_OFFSET + 0x4000. 43 */ 44#if (KERNEL_RAM_ADDR & 0xffff) != 0x8000 45#error KERNEL_RAM_ADDR must start at 0xXXXX8000 46#endif 47 48 .globl swapper_pg_dir 49 .equ swapper_pg_dir, KERNEL_RAM_ADDR - 0x4000 50 51 .macro pgtbl, rd 52 ldr \rd, =(__virt_to_phys(KERNEL_RAM_ADDR - 0x4000)) 53 .endm 54 55#ifdef CONFIG_XIP_KERNEL 56#define TEXTADDR XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) 57#else 58#define TEXTADDR KERNEL_RAM_ADDR 59#endif 60 61/* 62 * Kernel startup entry point. 63 * --------------------------- 64 * 65 * This is normally called from the decompressor code. The requirements 66 * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0, 67 * r1 = machine nr. 68 * 69 * This code is mostly position independent, so if you link the kernel at 70 * 0xc0008000, you call this at __pa(0xc0008000). 71 * 72 * See linux/arch/arm/tools/mach-types for the complete list of machine 73 * numbers for r1. 74 * 75 * We're trying to keep crap to a minimum; DO NOT add any machine specific 76 * crap here - that's what the boot loader (or in extreme, well justified 77 * circumstances, zImage) is for. 78 */ 79 __INIT 80 .type stext, %function 81ENTRY(stext) 82 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC @ ensure svc mode 83 @ and irqs disabled 84 bl __lookup_processor_type @ r5=procinfo r9=cpuid 85 movs r10, r5 @ invalid processor (r5=0)? 86 beq __error_p @ yes, error 'p' 87 bl __lookup_machine_type @ r5=machinfo 88 movs r8, r5 @ invalid machine (r5=0)? 89 beq __error_a @ yes, error 'a' 90 bl __create_page_tables 91 92 /* 93 * The following calls CPU specific code in a position independent 94 * manner. See arch/arm/mm/proc-*.S for details. r10 = base of 95 * xxx_proc_info structure selected by __lookup_machine_type 96 * above. On return, the CPU will be ready for the MMU to be 97 * turned on, and r0 will hold the CPU control register value. 98 */ 99 ldr r13, __switch_data @ address to jump to after 100 @ mmu has been enabled 101 adr lr, __enable_mmu @ return (PIC) address 102 add pc, r10, #PROCINFO_INITFUNC 103 104 .type __switch_data, %object 105__switch_data: 106 .long __mmap_switched 107 .long __data_loc @ r4 108 .long __data_start @ r5 109 .long __bss_start @ r6 110 .long _end @ r7 111 .long processor_id @ r4 112 .long __machine_arch_type @ r5 113 .long cr_alignment @ r6 114 .long init_thread_union + THREAD_START_SP @ sp 115 116/* 117 * The following fragment of code is executed with the MMU on, and uses 118 * absolute addresses; this is not position independent. 119 * 120 * r0 = cp#15 control register 121 * r1 = machine ID 122 * r9 = processor ID 123 */ 124 .type __mmap_switched, %function 125__mmap_switched: 126 adr r3, __switch_data + 4 127 128 ldmia r3!, {r4, r5, r6, r7} 129 cmp r4, r5 @ Copy data segment if needed 1301: cmpne r5, r6 131 ldrne fp, [r4], #4 132 strne fp, [r5], #4 133 bne 1b 134 135 mov fp, #0 @ Clear BSS (and zero fp) 1361: cmp r6, r7 137 strcc fp, [r6],#4 138 bcc 1b 139 140 ldmia r3, {r4, r5, r6, sp} 141 str r9, [r4] @ Save processor ID 142 str r1, [r5] @ Save machine type 143 bic r4, r0, #CR_A @ Clear 'A' bit 144 stmia r6, {r0, r4} @ Save control register values 145 b start_kernel 146 147#if defined(CONFIG_SMP) 148 .type secondary_startup, #function 149ENTRY(secondary_startup) 150 /* 151 * Common entry point for secondary CPUs. 152 * 153 * Ensure that we're in SVC mode, and IRQs are disabled. Lookup 154 * the processor type - there is no need to check the machine type 155 * as it has already been validated by the primary processor. 156 */ 157 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC 158 bl __lookup_processor_type 159 movs r10, r5 @ invalid processor? 160 moveq r0, #'p' @ yes, error 'p' 161 beq __error 162 163 /* 164 * Use the page tables supplied from __cpu_up. 165 */ 166 adr r4, __secondary_data 167 ldmia r4, {r5, r6, r13} @ address to jump to after 168 sub r4, r4, r5 @ mmu has been enabled 169 ldr r4, [r6, r4] @ get secondary_data.pgdir 170 adr lr, __enable_mmu @ return address 171 add pc, r10, #12 @ initialise processor 172 @ (return control reg) 173 174 /* 175 * r6 = &secondary_data 176 */ 177ENTRY(__secondary_switched) 178 ldr sp, [r6, #4] @ get secondary_data.stack 179 mov fp, #0 180 b secondary_start_kernel 181 182 .type __secondary_data, %object 183__secondary_data: 184 .long . 185 .long secondary_data 186 .long __secondary_switched 187#endif /* defined(CONFIG_SMP) */ 188 189 190 191/* 192 * Setup common bits before finally enabling the MMU. Essentially 193 * this is just loading the page table pointer and domain access 194 * registers. 195 */ 196 .type __enable_mmu, %function 197__enable_mmu: 198#ifdef CONFIG_ALIGNMENT_TRAP 199 orr r0, r0, #CR_A 200#else 201 bic r0, r0, #CR_A 202#endif 203#ifdef CONFIG_CPU_DCACHE_DISABLE 204 bic r0, r0, #CR_C 205#endif 206#ifdef CONFIG_CPU_BPREDICT_DISABLE 207 bic r0, r0, #CR_Z 208#endif 209#ifdef CONFIG_CPU_ICACHE_DISABLE 210 bic r0, r0, #CR_I 211#endif 212 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ 213 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ 214 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ 215 domain_val(DOMAIN_IO, DOMAIN_CLIENT)) 216 mcr p15, 0, r5, c3, c0, 0 @ load domain access register 217 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer 218 b __turn_mmu_on 219 220/* 221 * Enable the MMU. This completely changes the structure of the visible 222 * memory space. You will not be able to trace execution through this. 223 * If you have an enquiry about this, *please* check the linux-arm-kernel 224 * mailing list archives BEFORE sending another post to the list. 225 * 226 * r0 = cp#15 control register 227 * r13 = *virtual* address to jump to upon completion 228 * 229 * other registers depend on the function called upon completion 230 */ 231 .align 5 232 .type __turn_mmu_on, %function 233__turn_mmu_on: 234 mov r0, r0 235 mcr p15, 0, r0, c1, c0, 0 @ write control reg 236 mrc p15, 0, r3, c0, c0, 0 @ read id reg 237 mov r3, r3 238 mov r3, r3 239 mov pc, r13 240 241 242 243/* 244 * Setup the initial page tables. We only setup the barest 245 * amount which are required to get the kernel running, which 246 * generally means mapping in the kernel code. 247 * 248 * r8 = machinfo 249 * r9 = cpuid 250 * r10 = procinfo 251 * 252 * Returns: 253 * r0, r3, r5, r6, r7 corrupted 254 * r4 = physical page table address 255 */ 256 .type __create_page_tables, %function 257__create_page_tables: 258 ldr r5, [r8, #MACHINFO_PHYSRAM] @ physram 259 pgtbl r4 @ page table address 260 261 /* 262 * Clear the 16K level 1 swapper page table 263 */ 264 mov r0, r4 265 mov r3, #0 266 add r6, r0, #0x4000 2671: str r3, [r0], #4 268 str r3, [r0], #4 269 str r3, [r0], #4 270 str r3, [r0], #4 271 teq r0, r6 272 bne 1b 273 274 ldr r7, [r10, #PROCINFO_MMUFLAGS] @ mmuflags 275 276 /* 277 * Create identity mapping for first MB of kernel to 278 * cater for the MMU enable. This identity mapping 279 * will be removed by paging_init(). We use our current program 280 * counter to determine corresponding section base address. 281 */ 282 mov r6, pc, lsr #20 @ start of kernel section 283 orr r3, r7, r6, lsl #20 @ flags + kernel base 284 str r3, [r4, r6, lsl #2] @ identity mapping 285 286 /* 287 * Now setup the pagetables for our kernel direct 288 * mapped region. We round TEXTADDR down to the 289 * nearest megabyte boundary. It is assumed that 290 * the kernel fits within 4 contigous 1MB sections. 291 */ 292 add r0, r4, #(TEXTADDR & 0xff000000) >> 18 @ start of kernel 293 str r3, [r0, #(TEXTADDR & 0x00f00000) >> 18]! 294 add r3, r3, #1 << 20 295 str r3, [r0, #4]! @ KERNEL + 1MB 296 add r3, r3, #1 << 20 297 str r3, [r0, #4]! @ KERNEL + 2MB 298 add r3, r3, #1 << 20 299 str r3, [r0, #4] @ KERNEL + 3MB 300 301 /* 302 * Then map first 1MB of ram in case it contains our boot params. 303 */ 304 add r0, r4, #PAGE_OFFSET >> 18 305 orr r6, r5, r7 306 str r6, [r0] 307 308#ifdef CONFIG_XIP_KERNEL 309 /* 310 * Map some ram to cover our .data and .bss areas. 311 * Mapping 3MB should be plenty. 312 */ 313 sub r3, r4, r5 314 mov r3, r3, lsr #20 315 add r0, r0, r3, lsl #2 316 add r6, r6, r3, lsl #20 317 str r6, [r0], #4 318 add r6, r6, #(1 << 20) 319 str r6, [r0], #4 320 add r6, r6, #(1 << 20) 321 str r6, [r0] 322#endif 323 324#ifdef CONFIG_DEBUG_LL 325 bic r7, r7, #0x0c @ turn off cacheable 326 @ and bufferable bits 327 /* 328 * Map in IO space for serial debugging. 329 * This allows debug messages to be output 330 * via a serial console before paging_init. 331 */ 332 ldr r3, [r8, #MACHINFO_PGOFFIO] 333 add r0, r4, r3 334 rsb r3, r3, #0x4000 @ PTRS_PER_PGD*sizeof(long) 335 cmp r3, #0x0800 @ limit to 512MB 336 movhi r3, #0x0800 337 add r6, r0, r3 338 ldr r3, [r8, #MACHINFO_PHYSIO] 339 orr r3, r3, r7 3401: str r3, [r0], #4 341 add r3, r3, #1 << 20 342 teq r0, r6 343 bne 1b 344#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS) 345 /* 346 * If we're using the NetWinder, we need to map in 347 * the 16550-type serial port for the debug messages 348 */ 349 teq r1, #MACH_TYPE_NETWINDER 350 teqne r1, #MACH_TYPE_CATS 351 bne 1f 352 add r0, r4, #0xff000000 >> 18 353 orr r3, r7, #0x7c000000 354 str r3, [r0] 3551: 356#endif 357#ifdef CONFIG_ARCH_RPC 358 /* 359 * Map in screen at 0x02000000 & SCREEN2_BASE 360 * Similar reasons here - for debug. This is 361 * only for Acorn RiscPC architectures. 362 */ 363 add r0, r4, #0x02000000 >> 18 364 orr r3, r7, #0x02000000 365 str r3, [r0] 366 add r0, r4, #0xd8000000 >> 18 367 str r3, [r0] 368#endif 369#endif 370 mov pc, lr 371 .ltorg 372 373 374 375/* 376 * Exception handling. Something went wrong and we can't proceed. We 377 * ought to tell the user, but since we don't have any guarantee that 378 * we're even running on the right architecture, we do virtually nothing. 379 * 380 * If CONFIG_DEBUG_LL is set we try to print out something about the error 381 * and hope for the best (useful if bootloader fails to pass a proper 382 * machine ID for example). 383 */ 384 385 .type __error_p, %function 386__error_p: 387#ifdef CONFIG_DEBUG_LL 388 adr r0, str_p1 389 bl printascii 390 b __error 391str_p1: .asciz "\nError: unrecognized/unsupported processor variant.\n" 392 .align 393#endif 394 395 .type __error_a, %function 396__error_a: 397#ifdef CONFIG_DEBUG_LL 398 mov r4, r1 @ preserve machine ID 399 adr r0, str_a1 400 bl printascii 401 mov r0, r4 402 bl printhex8 403 adr r0, str_a2 404 bl printascii 405 adr r3, 3f 406 ldmia r3, {r4, r5, r6} @ get machine desc list 407 sub r4, r3, r4 @ get offset between virt&phys 408 add r5, r5, r4 @ convert virt addresses to 409 add r6, r6, r4 @ physical address space 4101: ldr r0, [r5, #MACHINFO_TYPE] @ get machine type 411 bl printhex8 412 mov r0, #'\t' 413 bl printch 414 ldr r0, [r5, #MACHINFO_NAME] @ get machine name 415 add r0, r0, r4 416 bl printascii 417 mov r0, #'\n' 418 bl printch 419 add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc 420 cmp r5, r6 421 blo 1b 422 adr r0, str_a3 423 bl printascii 424 b __error 425str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x" 426str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n" 427str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n" 428 .align 429#endif 430 431 .type __error, %function 432__error: 433#ifdef CONFIG_ARCH_RPC 434/* 435 * Turn the screen red on a error - RiscPC only. 436 */ 437 mov r0, #0x02000000 438 mov r3, #0x11 439 orr r3, r3, r3, lsl #8 440 orr r3, r3, r3, lsl #16 441 str r3, [r0], #4 442 str r3, [r0], #4 443 str r3, [r0], #4 444 str r3, [r0], #4 445#endif 4461: mov r0, r0 447 b 1b 448 449 450/* 451 * Read processor ID register (CP#15, CR0), and look up in the linker-built 452 * supported processor list. Note that we can't use the absolute addresses 453 * for the __proc_info lists since we aren't running with the MMU on 454 * (and therefore, we are not in the correct address space). We have to 455 * calculate the offset. 456 * 457 * Returns: 458 * r3, r4, r6 corrupted 459 * r5 = proc_info pointer in physical address space 460 * r9 = cpuid 461 */ 462 .type __lookup_processor_type, %function 463__lookup_processor_type: 464 adr r3, 3f 465 ldmda r3, {r5, r6, r9} 466 sub r3, r3, r9 @ get offset between virt&phys 467 add r5, r5, r3 @ convert virt addresses to 468 add r6, r6, r3 @ physical address space 469 mrc p15, 0, r9, c0, c0 @ get processor id 4701: ldmia r5, {r3, r4} @ value, mask 471 and r4, r4, r9 @ mask wanted bits 472 teq r3, r4 473 beq 2f 474 add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list) 475 cmp r5, r6 476 blo 1b 477 mov r5, #0 @ unknown processor 4782: mov pc, lr 479 480/* 481 * This provides a C-API version of the above function. 482 */ 483ENTRY(lookup_processor_type) 484 stmfd sp!, {r4 - r6, r9, lr} 485 bl __lookup_processor_type 486 mov r0, r5 487 ldmfd sp!, {r4 - r6, r9, pc} 488 489/* 490 * Look in include/asm-arm/procinfo.h and arch/arm/kernel/arch.[ch] for 491 * more information about the __proc_info and __arch_info structures. 492 */ 493 .long __proc_info_begin 494 .long __proc_info_end 4953: .long . 496 .long __arch_info_begin 497 .long __arch_info_end 498 499/* 500 * Lookup machine architecture in the linker-build list of architectures. 501 * Note that we can't use the absolute addresses for the __arch_info 502 * lists since we aren't running with the MMU on (and therefore, we are 503 * not in the correct address space). We have to calculate the offset. 504 * 505 * r1 = machine architecture number 506 * Returns: 507 * r3, r4, r6 corrupted 508 * r5 = mach_info pointer in physical address space 509 */ 510 .type __lookup_machine_type, %function 511__lookup_machine_type: 512 adr r3, 3b 513 ldmia r3, {r4, r5, r6} 514 sub r3, r3, r4 @ get offset between virt&phys 515 add r5, r5, r3 @ convert virt addresses to 516 add r6, r6, r3 @ physical address space 5171: ldr r3, [r5, #MACHINFO_TYPE] @ get machine type 518 teq r3, r1 @ matches loader number? 519 beq 2f @ found 520 add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc 521 cmp r5, r6 522 blo 1b 523 mov r5, #0 @ unknown machine 5242: mov pc, lr 525 526/* 527 * This provides a C-API version of the above function. 528 */ 529ENTRY(lookup_machine_type) 530 stmfd sp!, {r4 - r6, lr} 531 mov r1, r0 532 bl __lookup_machine_type 533 mov r0, r5 534 ldmfd sp!, {r4 - r6, pc} 535