1/* $NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $ */ 2 3/*- 4 * Copyright 2011 Semihalf 5 * Copyright (C) 1994-1997 Mark Brinicombe 6 * Copyright (C) 1994 Brini 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by Brini. 20 * 4. The name of Brini may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 */ 35 36#include "assym.s" 37#include <sys/syscall.h> 38#include <machine/asm.h> 39#include <machine/armreg.h> 40#include <machine/pte.h> 41 42__FBSDID("$FreeBSD$"); 43 44/* What size should this really be ? It is only used by initarm() */ 45#define INIT_ARM_STACK_SIZE (2048 * 4) 46 47#define CPWAIT_BRANCH \ 48 sub pc, pc, #4 49 50#define CPWAIT(tmp) \ 51 mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\ 52 mov tmp, tmp /* wait for it to complete */ ;\ 53 CPWAIT_BRANCH /* branch to next insn */ 54 55/* 56 * This is for kvm_mkdb, and should be the address of the beginning 57 * of the kernel text segment (not necessarily the same as kernbase). 58 */ 59 .text 60 .align 0 61.globl kernbase 62.set kernbase,KERNBASE 63.globl physaddr 64.set physaddr,PHYSADDR 65 66/* 67 * On entry for FreeBSD boot ABI: 68 * r0 - metadata pointer or 0 (boothowto on AT91's boot2) 69 * r1 - if (r0 == 0) then metadata pointer 70 * On entry for Linux boot ABI: 71 * r0 - 0 72 * r1 - machine type (passed as arg2 to initarm) 73 * r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm) 74 * 75 * For both types of boot we gather up the args, put them in a struct arm_boot_params 76 * structure and pass that to initarm. 77 */ 78ENTRY_NP(btext) 79ASENTRY_NP(_start) 80 mov r9, r0 /* 0 or boot mode from boot2 */ 81 mov r8, r1 /* Save Machine type */ 82 mov ip, r2 /* Save meta data */ 83 mov fp, r3 /* Future expantion */ 84 85 /* Make sure interrupts are disabled. */ 86 mrs r7, cpsr 87 orr r7, r7, #(I32_bit|F32_bit) 88 msr cpsr_c, r7 89 90#if defined (FLASHADDR) && defined(LOADERRAMADDR) 91 /* Check if we're running from flash. */ 92 ldr r7, =FLASHADDR 93 /* 94 * If we're running with MMU disabled, test against the 95 * physical address instead. 96 */ 97 mrc p15, 0, r2, c1, c0, 0 98 ands r2, r2, #CPU_CONTROL_MMU_ENABLE 99 ldreq r6, =PHYSADDR 100 ldrne r6, =LOADERRAMADDR 101 cmp r7, r6 102 bls flash_lower 103 cmp r7, pc 104 bhi from_ram 105 b do_copy 106 107flash_lower: 108 cmp r6, pc 109 bls from_ram 110do_copy: 111 ldr r7, =KERNBASE 112 adr r1, _start 113 ldr r0, Lreal_start 114 ldr r2, Lend 115 sub r2, r2, r0 116 sub r0, r0, r7 117 add r0, r0, r6 118 mov r4, r0 119 bl memcpy 120 ldr r0, Lram_offset 121 add pc, r4, r0 122Lram_offset: .word from_ram-_C_LABEL(_start) 123from_ram: 124 nop 125#endif 126 adr r7, Lunmapped 127 bic r7, r7, #0xf0000000 128 orr r7, r7, #PHYSADDR 129 130 131disable_mmu: 132 /* Disable MMU for a while */ 133 mrc p15, 0, r2, c1, c0, 0 134 bic r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\ 135 CPU_CONTROL_WBUF_ENABLE) 136 bic r2, r2, #(CPU_CONTROL_IC_ENABLE) 137 bic r2, r2, #(CPU_CONTROL_BPRD_ENABLE) 138 mcr p15, 0, r2, c1, c0, 0 139 140 nop 141 nop 142 nop 143 mov pc, r7 144Lunmapped: 145#ifdef STARTUP_PAGETABLE_ADDR 146 /* build page table from scratch */ 147 ldr r0, Lstartup_pagetable 148 adr r4, mmu_init_table 149 b 3f 150 1512: 152 str r3, [r0, r2] 153 add r2, r2, #4 154 add r3, r3, #(L1_S_SIZE) 155 adds r1, r1, #-1 156 bhi 2b 1573: 158 ldmia r4!, {r1,r2,r3} /* # of sections, VA, PA|attr */ 159 cmp r1, #0 160 adrne r5, 2b 161 bicne r5, r5, #0xf0000000 162 orrne r5, r5, #PHYSADDR 163 movne pc, r5 164 165#if defined(SMP) 166 orr r0, r0, #2 /* Set TTB shared memory flag */ 167#endif 168 mcr p15, 0, r0, c2, c0, 0 /* Set TTB */ 169 mcr p15, 0, r0, c8, c7, 0 /* Flush TLB */ 170 171#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B) 172 mov r0, #0 173 mcr p15, 0, r0, c13, c0, 1 /* Set ASID to 0 */ 174#endif 175 176 /* Set the Domain Access register. Very important! */ 177 mov r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT) 178 mcr p15, 0, r0, c3, c0, 0 179 /* 180 * Enable MMU. 181 * On armv6 enable extended page tables, and set alignment checking 182 * to modulo-4 (CPU_CONTROL_UNAL_ENABLE) for the ldrd/strd 183 * instructions emitted by clang. 184 */ 185 mrc p15, 0, r0, c1, c0, 0 186#ifdef _ARM_ARCH_6 187 orr r0, r0, #(CPU_CONTROL_V6_EXTPAGE | CPU_CONTROL_UNAL_ENABLE) 188 orr r2, r2, #(CPU_CONTROL_AFLT_ENABLE) 189#endif 190 orr r0, r0, #(CPU_CONTROL_MMU_ENABLE) 191 mcr p15, 0, r0, c1, c0, 0 192 nop 193 nop 194 nop 195 CPWAIT(r0) 196 197#endif 198mmu_done: 199 nop 200 adr r1, .Lstart 201 ldmia r1, {r1, r2, sp} /* Set initial stack and */ 202 sub r2, r2, r1 /* get zero init data */ 203 mov r3, #0 204.L1: 205 str r3, [r1], #0x0004 /* get zero init data */ 206 subs r2, r2, #4 207 bgt .L1 208 ldr pc, .Lvirt_done 209 210virt_done: 211 mov r1, #20 /* loader info size is 20 bytes also second arg */ 212 subs sp, sp, r1 /* allocate arm_boot_params struct on stack */ 213 bic sp, sp, #7 /* align stack to 8 bytes */ 214 mov r0, sp /* loader info pointer is first arg */ 215 str r1, [r0] /* Store length of loader info */ 216 str r9, [r0, #4] /* Store r0 from boot loader */ 217 str r8, [r0, #8] /* Store r1 from boot loader */ 218 str ip, [r0, #12] /* store r2 from boot loader */ 219 str fp, [r0, #16] /* store r3 from boot loader */ 220 mov fp, #0 /* trace back starts here */ 221 bl _C_LABEL(initarm) /* Off we go */ 222 223 /* init arm will return the new stack pointer. */ 224 mov sp, r0 225 226 bl _C_LABEL(mi_startup) /* call mi_startup()! */ 227 228 adr r0, .Lmainreturned 229 b _C_LABEL(panic) 230 /* NOTREACHED */ 231#ifdef STARTUP_PAGETABLE_ADDR 232#define MMU_INIT(va,pa,n_sec,attr) \ 233 .word n_sec ; \ 234 .word 4*((va)>>L1_S_SHIFT) ; \ 235 .word (pa)|(attr) ; 236 237Lvirtaddr: 238 .word KERNVIRTADDR 239Lphysaddr: 240 .word KERNPHYSADDR 241Lreal_start: 242 .word _start 243Lend: 244 .word _edata 245Lstartup_pagetable: 246 .word STARTUP_PAGETABLE_ADDR 247#ifdef SMP 248Lstartup_pagetable_secondary: 249 .word temp_pagetable 250#endif 251END(btext) 252END(_start) 253 254mmu_init_table: 255 /* fill all table VA==PA */ 256 /* map SDRAM VA==PA, WT cacheable */ 257#if !defined(SMP) 258 MMU_INIT(PHYSADDR, PHYSADDR , 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)) 259 /* map VA 0xc0000000..0xc3ffffff to PA */ 260 MMU_INIT(KERNBASE, PHYSADDR, 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)) 261#else 262 MMU_INIT(PHYSADDR, PHYSADDR , 64, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW)) 263 /* map VA 0xc0000000..0xc3ffffff to PA */ 264 MMU_INIT(KERNBASE, PHYSADDR, 64, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW)) 265 MMU_INIT(0x48000000, 0x48000000, 1, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW)) 266#endif 267 .word 0 /* end of table */ 268#endif 269.Lstart: 270 .word _edata 271 .word _end 272 .word svcstk + INIT_ARM_STACK_SIZE 273 274.Lvirt_done: 275 .word virt_done 276#if defined(SMP) 277.Lmpvirt_done: 278 .word mpvirt_done 279#endif 280 281.Lmainreturned: 282 .asciz "main() returned" 283 .align 0 284 285 .bss 286svcstk: 287 .space INIT_ARM_STACK_SIZE 288 289 .text 290 .align 0 291 292.Lcpufuncs: 293 .word _C_LABEL(cpufuncs) 294 295#if defined(SMP) 296Lsramaddr: 297 .word 0xffff0080 298 299#if 0 300#define AP_DEBUG(tmp) \ 301 mrc p15, 0, r1, c0, c0, 5; \ 302 ldr r0, Lsramaddr; \ 303 add r0, r1, lsl #2; \ 304 mov r1, tmp; \ 305 str r1, [r0], #0x0000; 306#else 307#define AP_DEBUG(tmp) 308#endif 309 310 311ASENTRY_NP(mptramp) 312 mov r0, #0 313 mcr p15, 0, r0, c7, c7, 0 314 315 AP_DEBUG(#1) 316 317 mrs r3, cpsr_all 318 bic r3, r3, #(PSR_MODE) 319 orr r3, r3, #(PSR_SVC32_MODE) 320 msr cpsr_all, r3 321 322 mrc p15, 0, r0, c0, c0, 5 323 and r0, #0x0f /* Get CPU ID */ 324 325 /* Read boot address for CPU */ 326 mov r1, #0x100 327 mul r2, r0, r1 328 ldr r1, Lpmureg 329 add r0, r2, r1 330 ldr r1, [r0], #0x00 331 332 mov pc, r1 333 334Lpmureg: 335 .word 0xd0022124 336END(mptramp) 337 338ASENTRY_NP(mpentry) 339 340 AP_DEBUG(#2) 341 342 /* Make sure interrupts are disabled. */ 343 mrs r7, cpsr 344 orr r7, r7, #(I32_bit|F32_bit) 345 msr cpsr_c, r7 346 347 348 adr r7, Ltag 349 bic r7, r7, #0xf0000000 350 orr r7, r7, #PHYSADDR 351 352 /* Disable MMU for a while */ 353 mrc p15, 0, r2, c1, c0, 0 354 bic r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\ 355 CPU_CONTROL_WBUF_ENABLE) 356 bic r2, r2, #(CPU_CONTROL_IC_ENABLE) 357 bic r2, r2, #(CPU_CONTROL_BPRD_ENABLE) 358 mcr p15, 0, r2, c1, c0, 0 359 360 nop 361 nop 362 nop 363 364 AP_DEBUG(#3) 365 366Ltag: 367 ldr r0, Lstartup_pagetable_secondary 368 bic r0, r0, #0xf0000000 369 orr r0, r0, #PHYSADDR 370 ldr r0, [r0] 371#if defined(SMP) 372 orr r0, r0, #0 /* Set TTB shared memory flag */ 373#endif 374 mcr p15, 0, r0, c2, c0, 0 /* Set TTB */ 375 mcr p15, 0, r0, c8, c7, 0 /* Flush TLB */ 376 377#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA) 378 mov r0, #0 379 mcr p15, 0, r0, c13, c0, 1 /* Set ASID to 0 */ 380#endif 381 382 AP_DEBUG(#4) 383 384 /* Set the Domain Access register. Very important! */ 385 mov r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT) 386 mcr p15, 0, r0, c3, c0, 0 387 /* Enable MMU */ 388 mrc p15, 0, r0, c1, c0, 0 389#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA) 390 orr r0, r0, #CPU_CONTROL_V6_EXTPAGE 391#endif 392 orr r0, r0, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE) 393 mcr p15, 0, r0, c1, c0, 0 394 nop 395 nop 396 nop 397 CPWAIT(r0) 398 399 adr r1, .Lstart 400 ldmia r1, {r1, r2, sp} /* Set initial stack and */ 401 mrc p15, 0, r0, c0, c0, 5 402 and r0, r0, #15 403 mov r1, #2048 404 mul r2, r1, r0 405 sub sp, sp, r2 406 str r1, [sp] 407 ldr pc, .Lmpvirt_done 408 409mpvirt_done: 410 411 mov fp, #0 /* trace back starts here */ 412 bl _C_LABEL(init_secondary) /* Off we go */ 413 414 adr r0, .Lmpreturned 415 b _C_LABEL(panic) 416 /* NOTREACHED */ 417 418.Lmpreturned: 419 .asciz "main() returned" 420 .align 0 421END(mpentry) 422#endif 423 424ENTRY_NP(cpu_halt) 425 mrs r2, cpsr 426 bic r2, r2, #(PSR_MODE) 427 orr r2, r2, #(PSR_SVC32_MODE) 428 orr r2, r2, #(I32_bit | F32_bit) 429 msr cpsr_all, r2 430 431 ldr r4, .Lcpu_reset_address 432 ldr r4, [r4] 433 434 ldr r0, .Lcpufuncs 435 mov lr, pc 436 ldr pc, [r0, #CF_IDCACHE_WBINV_ALL] 437 mov lr, pc 438 ldr pc, [r0, #CF_L2CACHE_WBINV_ALL] 439 440 /* 441 * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's 442 * necessary. 443 */ 444 445 ldr r1, .Lcpu_reset_needs_v4_MMU_disable 446 ldr r1, [r1] 447 cmp r1, #0 448 mov r2, #0 449 450 /* 451 * MMU & IDC off, 32 bit program & data space 452 * Hurl ourselves into the ROM 453 */ 454 mov r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE) 455 mcr 15, 0, r0, c1, c0, 0 456 mcrne 15, 0, r2, c8, c7, 0 /* nail I+D TLB on ARMv4 and greater */ 457 mov pc, r4 458 459 /* 460 * _cpu_reset_address contains the address to branch to, to complete 461 * the cpu reset after turning the MMU off 462 * This variable is provided by the hardware specific code 463 */ 464.Lcpu_reset_address: 465 .word _C_LABEL(cpu_reset_address) 466 467 /* 468 * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the 469 * v4 MMU disable instruction needs executing... it is an illegal instruction 470 * on f.e. ARM6/7 that locks up the computer in an endless illegal 471 * instruction / data-abort / reset loop. 472 */ 473.Lcpu_reset_needs_v4_MMU_disable: 474 .word _C_LABEL(cpu_reset_needs_v4_MMU_disable) 475END(cpu_halt) 476 477 478/* 479 * setjump + longjmp 480 */ 481ENTRY(setjmp) 482 stmia r0, {r4-r14} 483 mov r0, #0x00000000 484 RET 485END(setjmp) 486 487ENTRY(longjmp) 488 ldmia r0, {r4-r14} 489 mov r0, #0x00000001 490 RET 491END(longjmp) 492 493 .data 494 .global _C_LABEL(esym) 495_C_LABEL(esym): .word _C_LABEL(end) 496 497ENTRY_NP(abort) 498 b _C_LABEL(abort) 499END(abort) 500 501ENTRY_NP(sigcode) 502 mov r0, sp 503 504 /* 505 * Call the sigreturn system call. 506 * 507 * We have to load r7 manually rather than using 508 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is 509 * correct. Using the alternative places esigcode at the address 510 * of the data rather than the address one past the data. 511 */ 512 513 ldr r7, [pc, #12] /* Load SYS_sigreturn */ 514 swi SYS_sigreturn 515 516 /* Well if that failed we better exit quick ! */ 517 518 ldr r7, [pc, #8] /* Load SYS_exit */ 519 swi SYS_exit 520 521 /* Branch back to retry SYS_sigreturn */ 522 b . - 16 523 524 .word SYS_sigreturn 525 .word SYS_exit 526 527 .align 0 528 .global _C_LABEL(esigcode) 529 _C_LABEL(esigcode): 530 531 .data 532 .global szsigcode 533szsigcode: 534 .long esigcode-sigcode 535END(sigcode) 536/* End of locore.S */ 537