1/* 2 * linux/arch/arm/kernel/head.S 3 * 4 * Copyright (C) 1994-2002 Russell King 5 * Copyright (c) 2003 ARM Limited 6 * All Rights Reserved 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * Kernel startup code for all 32-bit CPUs 13 */ 14#include <linux/linkage.h> 15#include <linux/init.h> 16 17#include <asm/assembler.h> 18#include <asm/domain.h> 19#include <asm/ptrace.h> 20#include <asm/asm-offsets.h> 21#include <asm/memory.h> 22#include <asm/thread_info.h> 23#include <asm/system.h> 24 25#ifdef CONFIG_DEBUG_LL 26#include <mach/debug-macro.S> 27#endif 28 29#if (PHYS_OFFSET & 0x001fffff) 30#error "PHYS_OFFSET must be at an even 2MiB boundary!" 31#endif 32 33#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) 34#define KERNEL_RAM_PADDR (PHYS_OFFSET + TEXT_OFFSET) 35 36 37/* 38 * swapper_pg_dir is the virtual address of the initial page table. 39 * We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must 40 * make sure that KERNEL_RAM_VADDR is correctly set. Currently, we expect 41 * the least significant 16 bits to be 0x8000, but we could probably 42 * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000. 43 */ 44#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000 45#error KERNEL_RAM_VADDR must start at 0xXXXX8000 46#endif 47 48 .globl swapper_pg_dir 49 .equ swapper_pg_dir, KERNEL_RAM_VADDR - 0x4000 50 51 .macro pgtbl, rd 52 ldr \rd, =(KERNEL_RAM_PADDR - 0x4000) 53 .endm 54 55#ifdef CONFIG_XIP_KERNEL 56#define KERNEL_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) 57#define KERNEL_END _edata_loc 58#else 59#define KERNEL_START KERNEL_RAM_VADDR 60#define KERNEL_END _end 61#endif 62 63/* 64 * Kernel startup entry point. 65 * --------------------------- 66 * 67 * This is normally called from the decompressor code. The requirements 68 * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0, 69 * r1 = machine nr, r2 = atags pointer. 70 * 71 * This code is mostly position independent, so if you link the kernel at 72 * 0xc0008000, you call this at __pa(0xc0008000). 73 * 74 * See linux/arch/arm/tools/mach-types for the complete list of machine 75 * numbers for r1. 76 * 77 * We're trying to keep crap to a minimum; DO NOT add any machine specific 78 * crap here - that's what the boot loader (or in extreme, well justified 79 * circumstances, zImage) is for. 80 */ 81 __HEAD 82ENTRY(stext) 83 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode 84 @ and irqs disabled 85 mrc p15, 0, r9, c0, c0 @ get processor id 86 bl __lookup_processor_type @ r5=procinfo r9=cpuid 87 movs r10, r5 @ invalid processor (r5=0)? 88 beq __error_p @ yes, error 'p' 89 bl __lookup_machine_type @ r5=machinfo 90 movs r8, r5 @ invalid machine (r5=0)? 91 beq __error_a @ yes, error 'a' 92 bl __vet_atags 93#ifdef CONFIG_SMP_ON_UP 94 bl __fixup_smp 95#endif 96 bl __create_page_tables 97 98 /* 99 * The following calls CPU specific code in a position independent 100 * manner. See arch/arm/mm/proc-*.S for details. r10 = base of 101 * xxx_proc_info structure selected by __lookup_machine_type 102 * above. On return, the CPU will be ready for the MMU to be 103 * turned on, and r0 will hold the CPU control register value. 104 */ 105 ldr r13, =__mmap_switched @ address to jump to after 106 @ mmu has been enabled 107 adr lr, BSYM(1f) @ return (PIC) address 108 ARM( add pc, r10, #PROCINFO_INITFUNC ) 109 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 110 THUMB( mov pc, r12 ) 1111: b __enable_mmu 112ENDPROC(stext) 113 .ltorg 114 115/* 116 * Setup the initial page tables. We only setup the barest 117 * amount which are required to get the kernel running, which 118 * generally means mapping in the kernel code. 119 * 120 * r8 = machinfo 121 * r9 = cpuid 122 * r10 = procinfo 123 * 124 * Returns: 125 * r0, r3, r5-r7 corrupted 126 * r4 = physical page table address 127 */ 128__create_page_tables: 129 pgtbl r4 @ page table address 130 131 /* 132 * Clear the 16K level 1 swapper page table 133 */ 134 mov r0, r4 135 mov r3, #0 136 add r6, r0, #0x4000 1371: str r3, [r0], #4 138 str r3, [r0], #4 139 str r3, [r0], #4 140 str r3, [r0], #4 141 teq r0, r6 142 bne 1b 143 144 ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags 145 146 /* 147 * Create identity mapping to cater for __enable_mmu. 148 * This identity mapping will be removed by paging_init(). 149 */ 150 adr r0, __enable_mmu_loc 151 ldmia r0, {r3, r5, r6} 152 sub r0, r0, r3 @ virt->phys offset 153 add r5, r5, r0 @ phys __enable_mmu 154 add r6, r6, r0 @ phys __enable_mmu_end 155 mov r5, r5, lsr #20 156 mov r6, r6, lsr #20 157 1581: orr r3, r7, r5, lsl #20 @ flags + kernel base 159 str r3, [r4, r5, lsl #2] @ identity mapping 160 teq r5, r6 161 addne r5, r5, #1 @ next section 162 bne 1b 163 164 /* 165 * Now setup the pagetables for our kernel direct 166 * mapped region. 167 */ 168 mov r3, pc 169 mov r3, r3, lsr #20 170 orr r3, r7, r3, lsl #20 171 add r0, r4, #(KERNEL_START & 0xff000000) >> 18 172 str r3, [r0, #(KERNEL_START & 0x00f00000) >> 18]! 173 ldr r6, =(KERNEL_END - 1) 174 add r0, r0, #4 175 add r6, r4, r6, lsr #18 1761: cmp r0, r6 177 add r3, r3, #1 << 20 178 strls r3, [r0], #4 179 bls 1b 180 181#ifdef CONFIG_XIP_KERNEL 182 /* 183 * Map some ram to cover our .data and .bss areas. 184 */ 185 orr r3, r7, #(KERNEL_RAM_PADDR & 0xff000000) 186 .if (KERNEL_RAM_PADDR & 0x00f00000) 187 orr r3, r3, #(KERNEL_RAM_PADDR & 0x00f00000) 188 .endif 189 add r0, r4, #(KERNEL_RAM_VADDR & 0xff000000) >> 18 190 str r3, [r0, #(KERNEL_RAM_VADDR & 0x00f00000) >> 18]! 191 ldr r6, =(_end - 1) 192 add r0, r0, #4 193 add r6, r4, r6, lsr #18 1941: cmp r0, r6 195 add r3, r3, #1 << 20 196 strls r3, [r0], #4 197 bls 1b 198#endif 199 200 /* 201 * Then map first 1MB of ram in case it contains our boot params. 202 */ 203 add r0, r4, #PAGE_OFFSET >> 18 204 orr r6, r7, #(PHYS_OFFSET & 0xff000000) 205 .if (PHYS_OFFSET & 0x00f00000) 206 orr r6, r6, #(PHYS_OFFSET & 0x00f00000) 207 .endif 208 str r6, [r0] 209 210#ifdef CONFIG_DEBUG_LL 211#ifndef CONFIG_DEBUG_ICEDCC 212 /* 213 * Map in IO space for serial debugging. 214 * This allows debug messages to be output 215 * via a serial console before paging_init. 216 */ 217 addruart r7, r3 218 219 mov r3, r3, lsr #20 220 mov r3, r3, lsl #2 221 222 add r0, r4, r3 223 rsb r3, r3, #0x4000 @ PTRS_PER_PGD*sizeof(long) 224 cmp r3, #0x0800 @ limit to 512MB 225 movhi r3, #0x0800 226 add r6, r0, r3 227 mov r3, r7, lsr #20 228 ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags 229 orr r3, r7, r3, lsl #20 2301: str r3, [r0], #4 231 add r3, r3, #1 << 20 232 teq r0, r6 233 bne 1b 234 235#else /* CONFIG_DEBUG_ICEDCC */ 236 /* we don't need any serial debugging mappings for ICEDCC */ 237 ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags 238#endif /* !CONFIG_DEBUG_ICEDCC */ 239 240#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS) 241 /* 242 * If we're using the NetWinder or CATS, we also need to map 243 * in the 16550-type serial port for the debug messages 244 */ 245 add r0, r4, #0xff000000 >> 18 246 orr r3, r7, #0x7c000000 247 str r3, [r0] 248#endif 249#ifdef CONFIG_ARCH_RPC 250 /* 251 * Map in screen at 0x02000000 & SCREEN2_BASE 252 * Similar reasons here - for debug. This is 253 * only for Acorn RiscPC architectures. 254 */ 255 add r0, r4, #0x02000000 >> 18 256 orr r3, r7, #0x02000000 257 str r3, [r0] 258 add r0, r4, #0xd8000000 >> 18 259 str r3, [r0] 260#endif 261#endif 262 mov pc, lr 263ENDPROC(__create_page_tables) 264 .ltorg 265__enable_mmu_loc: 266 .long . 267 .long __enable_mmu 268 .long __enable_mmu_end 269 270#if defined(CONFIG_SMP) 271 __CPUINIT 272ENTRY(secondary_startup) 273 /* 274 * Common entry point for secondary CPUs. 275 * 276 * Ensure that we're in SVC mode, and IRQs are disabled. Lookup 277 * the processor type - there is no need to check the machine type 278 * as it has already been validated by the primary processor. 279 */ 280 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 281 mrc p15, 0, r9, c0, c0 @ get processor id 282 bl __lookup_processor_type 283 movs r10, r5 @ invalid processor? 284 moveq r0, #'p' @ yes, error 'p' 285 beq __error_p 286 287 /* 288 * Use the page tables supplied from __cpu_up. 289 */ 290 adr r4, __secondary_data 291 ldmia r4, {r5, r7, r12} @ address to jump to after 292 sub r4, r4, r5 @ mmu has been enabled 293 ldr r4, [r7, r4] @ get secondary_data.pgdir 294 adr lr, BSYM(__enable_mmu) @ return address 295 mov r13, r12 @ __secondary_switched address 296 ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor 297 @ (return control reg) 298 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 299 THUMB( mov pc, r12 ) 300ENDPROC(secondary_startup) 301 302 /* 303 * r6 = &secondary_data 304 */ 305ENTRY(__secondary_switched) 306 ldr sp, [r7, #4] @ get secondary_data.stack 307 mov fp, #0 308 b secondary_start_kernel 309ENDPROC(__secondary_switched) 310 311 .type __secondary_data, %object 312__secondary_data: 313 .long . 314 .long secondary_data 315 .long __secondary_switched 316#endif /* defined(CONFIG_SMP) */ 317 318 319 320/* 321 * Setup common bits before finally enabling the MMU. Essentially 322 * this is just loading the page table pointer and domain access 323 * registers. 324 * 325 * r0 = cp#15 control register 326 * r1 = machine ID 327 * r2 = atags pointer 328 * r4 = page table pointer 329 * r9 = processor ID 330 * r13 = *virtual* address to jump to upon completion 331 */ 332__enable_mmu: 333#ifdef CONFIG_ALIGNMENT_TRAP 334 orr r0, r0, #CR_A 335#else 336 bic r0, r0, #CR_A 337#endif 338#ifdef CONFIG_CPU_DCACHE_DISABLE 339 bic r0, r0, #CR_C 340#endif 341#ifdef CONFIG_CPU_BPREDICT_DISABLE 342 bic r0, r0, #CR_Z 343#endif 344#ifdef CONFIG_CPU_ICACHE_DISABLE 345 bic r0, r0, #CR_I 346#endif 347 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ 348 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ 349 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ 350 domain_val(DOMAIN_IO, DOMAIN_CLIENT)) 351 mcr p15, 0, r5, c3, c0, 0 @ load domain access register 352 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer 353 b __turn_mmu_on 354ENDPROC(__enable_mmu) 355 356/* 357 * Enable the MMU. This completely changes the structure of the visible 358 * memory space. You will not be able to trace execution through this. 359 * If you have an enquiry about this, *please* check the linux-arm-kernel 360 * mailing list archives BEFORE sending another post to the list. 361 * 362 * r0 = cp#15 control register 363 * r1 = machine ID 364 * r2 = atags pointer 365 * r9 = processor ID 366 * r13 = *virtual* address to jump to upon completion 367 * 368 * other registers depend on the function called upon completion 369 */ 370 .align 5 371__turn_mmu_on: 372 mov r0, r0 373 mcr p15, 0, r0, c1, c0, 0 @ write control reg 374 mrc p15, 0, r3, c0, c0, 0 @ read id reg 375 mov r3, r3 376 mov r3, r13 377 mov pc, r3 378__enable_mmu_end: 379ENDPROC(__turn_mmu_on) 380 381 382#ifdef CONFIG_SMP_ON_UP 383__fixup_smp: 384 mov r7, #0x00070000 385 orr r6, r7, #0xff000000 @ mask 0xff070000 386 orr r7, r7, #0x41000000 @ val 0x41070000 387 and r0, r9, r6 388 teq r0, r7 @ ARM CPU and ARMv6/v7? 389 bne __fixup_smp_on_up @ no, assume UP 390 391 orr r6, r6, #0x0000ff00 392 orr r6, r6, #0x000000f0 @ mask 0xff07fff0 393 orr r7, r7, #0x0000b000 394 orr r7, r7, #0x00000020 @ val 0x4107b020 395 and r0, r9, r6 396 teq r0, r7 @ ARM 11MPCore? 397 moveq pc, lr @ yes, assume SMP 398 399 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR 400 tst r0, #1 << 31 401 movne pc, lr @ bit 31 => SMP 402 403__fixup_smp_on_up: 404 adr r0, 1f 405 ldmia r0, {r3, r6, r7} 406 sub r3, r0, r3 407 add r6, r6, r3 408 add r7, r7, r3 4092: cmp r6, r7 410 ldmia r6!, {r0, r4} 411 strlo r4, [r0, r3] 412 blo 2b 413 mov pc, lr 414ENDPROC(__fixup_smp) 415 4161: .word . 417 .word __smpalt_begin 418 .word __smpalt_end 419 420 .pushsection .data 421 .globl smp_on_up 422smp_on_up: 423 ALT_SMP(.long 1) 424 ALT_UP(.long 0) 425 .popsection 426 427#endif 428 429#include "head-common.S" 430