1/* $NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $ */ 2 3/*- 4 * Copyright 2011 Semihalf 5 * Copyright (C) 1994-1997 Mark Brinicombe 6 * Copyright (C) 1994 Brini 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by Brini. 20 * 4. The name of Brini may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 */ 35 36#include "assym.s" 37#include <sys/syscall.h> 38#include <machine/asm.h> 39#include <machine/armreg.h> 40#include <machine/pte.h> 41 42__FBSDID("$FreeBSD$"); 43 44/* What size should this really be ? It is only used by initarm() */ 45#define INIT_ARM_STACK_SIZE (2048 * 4) 46 47#define CPWAIT_BRANCH \ 48 sub pc, pc, #4 49 50#define CPWAIT(tmp) \ 51 mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\ 52 mov tmp, tmp /* wait for it to complete */ ;\ 53 CPWAIT_BRANCH /* branch to next insn */ 54 55/* 56 * This is for kvm_mkdb, and should be the address of the beginning 57 * of the kernel text segment (not necessarily the same as kernbase). 58 */ 59 .text 60 .align 0 61.globl kernbase 62.set kernbase,KERNBASE 63.globl physaddr 64.set physaddr,PHYSADDR 65 66/* 67 * On entry for FreeBSD boot ABI: 68 * r0 - metadata pointer or 0 (boothowto on AT91's boot2) 69 * r1 - if (r0 == 0) then metadata pointer 70 * On entry for Linux boot ABI: 71 * r0 - 0 72 * r1 - machine type (passed as arg2 to initarm) 73 * r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm) 74 * 75 * For both types of boot we gather up the args, put them in a struct arm_boot_params 76 * structure and pass that to initarm. 77 */ 78ENTRY_NP(btext) 79ASENTRY_NP(_start) 80 mov r9, r0 /* 0 or boot mode from boot2 */ 81 mov r8, r1 /* Save Machine type */ 82 mov ip, r2 /* Save meta data */ 83 mov fp, r3 /* Future expantion */ 84 85 /* Make sure interrupts are disabled. */ 86 mrs r7, cpsr 87 orr r7, r7, #(I32_bit|F32_bit) 88 msr cpsr_c, r7 89 90#if defined (FLASHADDR) && defined(LOADERRAMADDR) 91 /* Check if we're running from flash. */ 92 ldr r7, =FLASHADDR 93 /* 94 * If we're running with MMU disabled, test against the 95 * physical address instead. 96 */ 97 mrc p15, 0, r2, c1, c0, 0 98 ands r2, r2, #CPU_CONTROL_MMU_ENABLE 99 ldreq r6, =PHYSADDR 100 ldrne r6, =LOADERRAMADDR 101 cmp r7, r6 102 bls flash_lower 103 cmp r7, pc 104 bhi from_ram 105 b do_copy 106 107flash_lower: 108 cmp r6, pc 109 bls from_ram 110do_copy: 111 ldr r7, =KERNBASE 112 adr r1, _start 113 ldr r0, Lreal_start 114 ldr r2, Lend 115 sub r2, r2, r0 116 sub r0, r0, r7 117 add r0, r0, r6 118 mov r4, r0 119 bl memcpy 120 ldr r0, Lram_offset 121 add pc, r4, r0 122Lram_offset: .word from_ram-_C_LABEL(_start) 123from_ram: 124 nop 125#endif 126 adr r7, Lunmapped 127 bic r7, r7, #0xf0000000 128 orr r7, r7, #PHYSADDR 129 130 131disable_mmu: 132 /* Disable MMU for a while */ 133 mrc p15, 0, r2, c1, c0, 0 134 bic r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\ 135 CPU_CONTROL_WBUF_ENABLE) 136 bic r2, r2, #(CPU_CONTROL_IC_ENABLE) 137 bic r2, r2, #(CPU_CONTROL_BPRD_ENABLE) 138 mcr p15, 0, r2, c1, c0, 0 139 140 nop 141 nop 142 nop 143 mov pc, r7 144Lunmapped: 145#ifdef STARTUP_PAGETABLE_ADDR 146 /* build page table from scratch */ 147 ldr r0, Lstartup_pagetable 148 adr r4, mmu_init_table 149 b 3f 150 1512: 152 str r3, [r0, r2] 153 add r2, r2, #4 154 add r3, r3, #(L1_S_SIZE) 155 adds r1, r1, #-1 156 bhi 2b 1573: 158 ldmia r4!, {r1,r2,r3} /* # of sections, VA, PA|attr */ 159 cmp r1, #0 160 adrne r5, 2b 161 bicne r5, r5, #0xf0000000 162 orrne r5, r5, #PHYSADDR 163 movne pc, r5 164 165#if defined(SMP) 166 orr r0, r0, #2 /* Set TTB shared memory flag */ 167#endif 168 mcr p15, 0, r0, c2, c0, 0 /* Set TTB */ 169 mcr p15, 0, r0, c8, c7, 0 /* Flush TLB */ 170 171#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B) 172 mov r0, #0 173 mcr p15, 0, r0, c13, c0, 1 /* Set ASID to 0 */ 174#endif 175 176 /* Set the Domain Access register. Very important! */ 177 mov r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT) 178 mcr p15, 0, r0, c3, c0, 0 179 /* Enable MMU */ 180 mrc p15, 0, r0, c1, c0, 0 181#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B) 182 orr r0, r0, #CPU_CONTROL_V6_EXTPAGE 183#endif 184 orr r0, r0, #(CPU_CONTROL_MMU_ENABLE) 185 mcr p15, 0, r0, c1, c0, 0 186 nop 187 nop 188 nop 189 CPWAIT(r0) 190 191#endif 192mmu_done: 193 nop 194 adr r1, .Lstart 195 ldmia r1, {r1, r2, sp} /* Set initial stack and */ 196 sub r2, r2, r1 /* get zero init data */ 197 mov r3, #0 198.L1: 199 str r3, [r1], #0x0004 /* get zero init data */ 200 subs r2, r2, #4 201 bgt .L1 202 ldr pc, .Lvirt_done 203 204virt_done: 205 mov r1, #20 /* loader info size is 20 bytes also second arg */ 206 subs sp, sp, r1 /* allocate arm_boot_params struct on stack */ 207 bic sp, sp, #7 /* align stack to 8 bytes */ 208 mov r0, sp /* loader info pointer is first arg */ 209 str r1, [r0] /* Store length of loader info */ 210 str r9, [r0, #4] /* Store r0 from boot loader */ 211 str r8, [r0, #8] /* Store r1 from boot loader */ 212 str ip, [r0, #12] /* store r2 from boot loader */ 213 str fp, [r0, #16] /* store r3 from boot loader */ 214 mov fp, #0 /* trace back starts here */ 215 bl _C_LABEL(initarm) /* Off we go */ 216 217 /* init arm will return the new stack pointer. */ 218 mov sp, r0 219 220 bl _C_LABEL(mi_startup) /* call mi_startup()! */ 221 222 adr r0, .Lmainreturned 223 b _C_LABEL(panic) 224 /* NOTREACHED */ 225#ifdef STARTUP_PAGETABLE_ADDR 226#define MMU_INIT(va,pa,n_sec,attr) \ 227 .word n_sec ; \ 228 .word 4*((va)>>L1_S_SHIFT) ; \ 229 .word (pa)|(attr) ; 230 231Lvirtaddr: 232 .word KERNVIRTADDR 233Lphysaddr: 234 .word KERNPHYSADDR 235Lreal_start: 236 .word _start 237Lend: 238 .word _edata 239Lstartup_pagetable: 240 .word STARTUP_PAGETABLE_ADDR 241#ifdef SMP 242Lstartup_pagetable_secondary: 243 .word temp_pagetable 244#endif 245END(btext) 246END(_start) 247 248mmu_init_table: 249 /* fill all table VA==PA */ 250 /* map SDRAM VA==PA, WT cacheable */ 251#if !defined(SMP) 252 MMU_INIT(PHYSADDR, PHYSADDR , 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)) 253 /* map VA 0xc0000000..0xc3ffffff to PA */ 254 MMU_INIT(KERNBASE, PHYSADDR, 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)) 255#else 256 MMU_INIT(PHYSADDR, PHYSADDR , 64, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW)) 257 /* map VA 0xc0000000..0xc3ffffff to PA */ 258 MMU_INIT(KERNBASE, PHYSADDR, 64, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW)) 259 MMU_INIT(0x48000000, 0x48000000, 1, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW)) 260#endif 261 .word 0 /* end of table */ 262#endif 263.Lstart: 264 .word _edata 265 .word _end 266 .word svcstk + INIT_ARM_STACK_SIZE 267 268.Lvirt_done: 269 .word virt_done 270#if defined(SMP) 271.Lmpvirt_done: 272 .word mpvirt_done 273#endif 274 275.Lmainreturned: 276 .asciz "main() returned" 277 .align 0 278 279 .bss 280svcstk: 281 .space INIT_ARM_STACK_SIZE 282 283 .text 284 .align 0 285 286.Lcpufuncs: 287 .word _C_LABEL(cpufuncs) 288 289#if defined(SMP) 290Lsramaddr: 291 .word 0xffff0080 292 293#if 0 294#define AP_DEBUG(tmp) \ 295 mrc p15, 0, r1, c0, c0, 5; \ 296 ldr r0, Lsramaddr; \ 297 add r0, r1, lsl #2; \ 298 mov r1, tmp; \ 299 str r1, [r0], #0x0000; 300#else 301#define AP_DEBUG(tmp) 302#endif 303 304 305ASENTRY_NP(mptramp) 306 mov r0, #0 307 mcr p15, 0, r0, c7, c7, 0 308 309 AP_DEBUG(#1) 310 311 mrs r3, cpsr_all 312 bic r3, r3, #(PSR_MODE) 313 orr r3, r3, #(PSR_SVC32_MODE) 314 msr cpsr_all, r3 315 316 mrc p15, 0, r0, c0, c0, 5 317 and r0, #0x0f /* Get CPU ID */ 318 319 /* Read boot address for CPU */ 320 mov r1, #0x100 321 mul r2, r0, r1 322 ldr r1, Lpmureg 323 add r0, r2, r1 324 ldr r1, [r0], #0x00 325 326 mov pc, r1 327 328Lpmureg: 329 .word 0xd0022124 330END(mptramp) 331 332ASENTRY_NP(mpentry) 333 334 AP_DEBUG(#2) 335 336 /* Make sure interrupts are disabled. */ 337 mrs r7, cpsr 338 orr r7, r7, #(I32_bit|F32_bit) 339 msr cpsr_c, r7 340 341 342 adr r7, Ltag 343 bic r7, r7, #0xf0000000 344 orr r7, r7, #PHYSADDR 345 346 /* Disable MMU for a while */ 347 mrc p15, 0, r2, c1, c0, 0 348 bic r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\ 349 CPU_CONTROL_WBUF_ENABLE) 350 bic r2, r2, #(CPU_CONTROL_IC_ENABLE) 351 bic r2, r2, #(CPU_CONTROL_BPRD_ENABLE) 352 mcr p15, 0, r2, c1, c0, 0 353 354 nop 355 nop 356 nop 357 358 AP_DEBUG(#3) 359 360Ltag: 361 ldr r0, Lstartup_pagetable_secondary 362 bic r0, r0, #0xf0000000 363 orr r0, r0, #PHYSADDR 364 ldr r0, [r0] 365#if defined(SMP) 366 orr r0, r0, #0 /* Set TTB shared memory flag */ 367#endif 368 mcr p15, 0, r0, c2, c0, 0 /* Set TTB */ 369 mcr p15, 0, r0, c8, c7, 0 /* Flush TLB */ 370 371#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA) 372 mov r0, #0 373 mcr p15, 0, r0, c13, c0, 1 /* Set ASID to 0 */ 374#endif 375 376 AP_DEBUG(#4) 377 378 /* Set the Domain Access register. Very important! */ 379 mov r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT) 380 mcr p15, 0, r0, c3, c0, 0 381 /* Enable MMU */ 382 mrc p15, 0, r0, c1, c0, 0 383#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA) 384 orr r0, r0, #CPU_CONTROL_V6_EXTPAGE 385#endif 386 orr r0, r0, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE) 387 mcr p15, 0, r0, c1, c0, 0 388 nop 389 nop 390 nop 391 CPWAIT(r0) 392 393 adr r1, .Lstart 394 ldmia r1, {r1, r2, sp} /* Set initial stack and */ 395 mrc p15, 0, r0, c0, c0, 5 396 and r0, r0, #15 397 mov r1, #2048 398 mul r2, r1, r0 399 sub sp, sp, r2 400 str r1, [sp] 401 ldr pc, .Lmpvirt_done 402 403mpvirt_done: 404 405 mov fp, #0 /* trace back starts here */ 406 bl _C_LABEL(init_secondary) /* Off we go */ 407 408 adr r0, .Lmpreturned 409 b _C_LABEL(panic) 410 /* NOTREACHED */ 411 412.Lmpreturned: 413 .asciz "main() returned" 414 .align 0 415END(mpentry) 416#endif 417 418ENTRY_NP(cpu_halt) 419 mrs r2, cpsr 420 bic r2, r2, #(PSR_MODE) 421 orr r2, r2, #(PSR_SVC32_MODE) 422 orr r2, r2, #(I32_bit | F32_bit) 423 msr cpsr_all, r2 424 425 ldr r4, .Lcpu_reset_address 426 ldr r4, [r4] 427 428 ldr r0, .Lcpufuncs 429 mov lr, pc 430 ldr pc, [r0, #CF_IDCACHE_WBINV_ALL] 431 mov lr, pc 432 ldr pc, [r0, #CF_L2CACHE_WBINV_ALL] 433 434 /* 435 * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's 436 * necessary. 437 */ 438 439 ldr r1, .Lcpu_reset_needs_v4_MMU_disable 440 ldr r1, [r1] 441 cmp r1, #0 442 mov r2, #0 443 444 /* 445 * MMU & IDC off, 32 bit program & data space 446 * Hurl ourselves into the ROM 447 */ 448 mov r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE) 449 mcr 15, 0, r0, c1, c0, 0 450 mcrne 15, 0, r2, c8, c7, 0 /* nail I+D TLB on ARMv4 and greater */ 451 mov pc, r4 452 453 /* 454 * _cpu_reset_address contains the address to branch to, to complete 455 * the cpu reset after turning the MMU off 456 * This variable is provided by the hardware specific code 457 */ 458.Lcpu_reset_address: 459 .word _C_LABEL(cpu_reset_address) 460 461 /* 462 * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the 463 * v4 MMU disable instruction needs executing... it is an illegal instruction 464 * on f.e. ARM6/7 that locks up the computer in an endless illegal 465 * instruction / data-abort / reset loop. 466 */ 467.Lcpu_reset_needs_v4_MMU_disable: 468 .word _C_LABEL(cpu_reset_needs_v4_MMU_disable) 469END(cpu_halt) 470 471 472/* 473 * setjump + longjmp 474 */ 475ENTRY(setjmp) 476 stmia r0, {r4-r14} 477 mov r0, #0x00000000 478 RET 479END(setjmp) 480 481ENTRY(longjmp) 482 ldmia r0, {r4-r14} 483 mov r0, #0x00000001 484 RET 485END(longjmp) 486 487 .data 488 .global _C_LABEL(esym) 489_C_LABEL(esym): .word _C_LABEL(end) 490 491ENTRY_NP(abort) 492 b _C_LABEL(abort) 493END(abort) 494 495ENTRY_NP(sigcode) 496 mov r0, sp 497 498 /* 499 * Call the sigreturn system call. 500 * 501 * We have to load r7 manually rather than using 502 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is 503 * correct. Using the alternative places esigcode at the address 504 * of the data rather than the address one past the data. 505 */ 506 507 ldr r7, [pc, #12] /* Load SYS_sigreturn */ 508 swi SYS_sigreturn 509 510 /* Well if that failed we better exit quick ! */ 511 512 ldr r7, [pc, #8] /* Load SYS_exit */ 513 swi SYS_exit 514 515 /* Branch back to retry SYS_sigreturn */ 516 b . - 16 517 518 .word SYS_sigreturn 519 .word SYS_exit 520 521 .align 0 522 .global _C_LABEL(esigcode) 523 _C_LABEL(esigcode): 524 525 .data 526 .global szsigcode 527szsigcode: 528 .long esigcode-sigcode 529END(sigcode) 530/* End of locore.S */ 531