1/*- 2 * Copyright 2004-2014 Olivier Houchard <cognet@FreeBSD.org> 3 * Copyright 2012-2014 Ian Lepore <ian@FreeBSD.org> 4 * Copyright 2013-2014 Andrew Turner <andrew@FreeBSD.org> 5 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com> 6 * Copyright 2014 Michal Meloun <meloun@miracle.cz> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31#include "assym.inc" 32#include <sys/syscall.h> 33#include <machine/asm.h> 34#include <machine/asmacros.h> 35#include <machine/armreg.h> 36#include <machine/sysreg.h> 37#include <machine/pte.h> 38/* We map 64MB of kernel unless overridden in assym.inc by the kernel option. */ 39#ifndef LOCORE_MAP_MB 40#define LOCORE_MAP_MB 64 41#endif 42 43#if __ARM_ARCH >= 7 44#if defined(__ARM_ARCH_7VE__) || defined(__clang__) 45/* 46 * HYP support is in bintuils >= 2.21 and gcc >= 4.9 defines __ARM_ARCH_7VE__ 47 * when enabled. llvm >= 3.6 supports it too. 48 */ 49.arch_extension virt 50#endif 51#endif /* __ARM_ARCH >= 7 */ 52 53/* A small statically-allocated stack used only during initarm() and AP startup. */ 54#define INIT_ARM_STACK_SIZE 2048 55 56 .text 57 .align 2 58 59 .globl kernbase 60 .set kernbase,KERNVIRTADDR 61 62#if __ARM_ARCH >= 7 63#define HANDLE_HYP \ 64 /* Leave HYP mode */ ;\ 65 mrs r0, cpsr ;\ 66 and r0, r0, #(PSR_MODE) /* Mode is in the low 5 bits of CPSR */ ;\ 67 teq r0, #(PSR_HYP32_MODE) /* Hyp Mode? */ ;\ 68 bne 1f ;\ 69 /* Install Hypervisor Stub Exception Vector */ ;\ 70 bl hypervisor_stub_vect_install ;\ 71 mov r0, 0 ;\ 72 adr r1, hypmode_enabled ;\ 73 str r0, [r1] ;\ 74 /* Ensure that IRQ, FIQ and Aborts will be disabled after eret */ ;\ 75 mrs r0, cpsr ;\ 76 bic r0, r0, #(PSR_MODE) ;\ 77 orr r0, r0, #(PSR_SVC32_MODE) ;\ 78 orr r0, r0, #(PSR_I | PSR_F | PSR_A) ;\ 79 msr spsr_cxsf, r0 ;\ 80 /* Exit hypervisor mode */ ;\ 81 adr lr, 2f ;\ 82 MSR_ELR_HYP(14) ;\ 83 ERET ;\ 841: ;\ 85 mov r0, -1 ;\ 86 adr r1, hypmode_enabled ;\ 87 str r0, [r1] ;\ 882: 89#else 90#define HANDLE_HYP 91#endif /* __ARM_ARCH >= 7 */ 92 93/* 94 * On entry for FreeBSD boot ABI: 95 * r0 - metadata pointer or 0 96 * r1 - if (r0 == 0) then metadata pointer 97 * On entry for Linux boot ABI: 98 * r0 - 0 99 * r1 - machine type (passed as arg2 to initarm) 100 * r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm) 101 * For both types of boot we gather up the args, put them in a struct arm_boot_params 102 * structure and pass that to initarm. 103 */ 104 .globl btext 105btext: 106ASENTRY_NP(_start) 107 STOP_UNWINDING /* Can't unwind into the bootloader! */ 108 109 /* Make sure interrupts are disabled. */ 110 cpsid ifa 111 112 mov r8, r0 /* 0 or boot mode from boot2 */ 113 mov r9, r1 /* Save Machine type */ 114 mov r10, r2 /* Save meta data */ 115 mov r11, r3 /* Future expansion */ 116 117 # If HYP-MODE is active, install an exception vector stub 118 HANDLE_HYP 119 120 /* 121 * Check whether data cache is enabled. If it is, then we know 122 * current tags are valid (not power-on garbage values) and there 123 * might be dirty lines that need cleaning. Disable cache to prevent 124 * new lines being allocated, then call wbinv_poc_all to clean it. 125 */ 126 mrc CP15_SCTLR(r7) 127 tst r7, #CPU_CONTROL_DC_ENABLE 128 blne dcache_wbinv_poc_all 129 130 /* ! Do not write to memory between wbinv and disabling cache ! */ 131 132 /* 133 * Now there are no dirty lines, but there may still be lines marked 134 * valid. Disable all caches and the MMU, and invalidate everything 135 * before setting up new page tables and re-enabling the mmu. 136 */ 1371: 138 bic r7, #CPU_CONTROL_DC_ENABLE 139 bic r7, #CPU_CONTROL_AFLT_ENABLE 140 bic r7, #CPU_CONTROL_MMU_ENABLE 141 bic r7, #CPU_CONTROL_IC_ENABLE 142 bic r7, #CPU_CONTROL_BPRD_ENABLE 143 bic r7, #CPU_CONTROL_SW_ENABLE 144 orr r7, #CPU_CONTROL_UNAL_ENABLE 145 orr r7, #CPU_CONTROL_VECRELOC 146 mcr CP15_SCTLR(r7) 147 DSB 148 ISB 149 bl dcache_inv_poc_all 150 mcr CP15_ICIALLU 151 DSB 152 ISB 153 154 /* 155 * Build page table from scratch. 156 */ 157 158 /* 159 * Figure out the physical address we're loaded at by assuming this 160 * entry point code is in the first L1 section and so if we clear the 161 * offset bits of the pc that will give us the section-aligned load 162 * address, which remains in r5 throughout all the following code. 163 */ 164 ldr r2, =(L1_S_OFFSET) 165 bic r5, pc, r2 166 167 /* Find the delta between VA and PA, result stays in r0 throughout. */ 168 adr r0, Lpagetable 169 bl translate_va_to_pa 170 171 /* 172 * First map the entire 4GB address space as VA=PA. It's mapped as 173 * normal (cached) memory because it's for things like accessing the 174 * parameters passed in from the bootloader, which might be at any 175 * physical address, different for every platform. 176 */ 177 mov r1, #0 178 mov r2, #0 179 mov r3, #4096 180 bl build_pagetables 181 182 /* 183 * Next we map the kernel starting at the physical load address, mapped 184 * to the VA the kernel is linked for. The default size we map is 64MiB 185 * but it can be overridden with a kernel option. 186 */ 187 mov r1, r5 188 ldr r2, =(KERNVIRTADDR) 189 ldr r3, =(LOCORE_MAP_MB) 190 bl build_pagetables 191 192 /* Create a device mapping for early_printf if specified. */ 193#if defined(SOCDEV_PA) && defined(SOCDEV_VA) 194 ldr r1, =SOCDEV_PA 195 ldr r2, =SOCDEV_VA 196 mov r3, #1 197 bl build_device_pagetables 198#endif 199 bl init_mmu 200 201 /* Transition the PC from physical to virtual addressing. */ 202 ldr pc, =1f 2031: 204 205 /* Setup stack, clear BSS */ 206 ldr r1, =.Lstart 207 ldmia r1, {r1, r2, sp} /* Set initial stack and */ 208 add sp, sp, #INIT_ARM_STACK_SIZE 209 sub r2, r2, r1 /* get zero init data */ 210 mov r3, #0 2112: 212 str r3, [r1], #0x0004 /* get zero init data */ 213 subs r2, r2, #4 214 bgt 2b 215 216 mov r1, #28 /* loader info size is 28 bytes also second arg */ 217 subs sp, sp, r1 /* allocate arm_boot_params struct on stack */ 218 mov r0, sp /* loader info pointer is first arg */ 219 bic sp, sp, #7 /* align stack to 8 bytes */ 220 str r1, [r0] /* Store length of loader info */ 221 str r8, [r0, #4] /* Store r0 from boot loader */ 222 str r9, [r0, #8] /* Store r1 from boot loader */ 223 str r10, [r0, #12] /* store r2 from boot loader */ 224 str r11, [r0, #16] /* store r3 from boot loader */ 225 str r5, [r0, #20] /* store the physical address */ 226 adr r4, Lpagetable /* load the pagetable address */ 227 ldr r5, [r4, #4] 228 str r5, [r0, #24] /* store the pagetable address */ 229 mov fp, #0 /* trace back starts here */ 230 bl _C_LABEL(initarm) /* Off we go */ 231 232 /* init arm will return the new stack pointer. */ 233 mov sp, r0 234 235 bl _C_LABEL(mi_startup) /* call mi_startup()! */ 236 237 ldr r0, =.Lmainreturned 238 b _C_LABEL(panic) 239 /* NOTREACHED */ 240END(_start) 241 242#define VA_TO_PA_POINTER(name, table) \ 243name: ;\ 244 .word . ;\ 245 .word table 246 247/* 248 * Returns the physical address of a magic va to pa pointer. 249 * r0 - The pagetable data pointer. This must be built using the 250 * VA_TO_PA_POINTER macro. 251 * e.g. 252 * VA_TO_PA_POINTER(Lpagetable, pagetable) 253 * ... 254 * adr r0, Lpagetable 255 * bl translate_va_to_pa 256 * r0 will now contain the physical address of pagetable 257 * r1, r2 - Trashed 258 */ 259translate_va_to_pa: 260 ldr r1, [r0] 261 sub r2, r1, r0 262 /* At this point: r2 = VA - PA */ 263 264 /* 265 * Find the physical address of the table. After these two 266 * instructions: 267 * r1 = va(pagetable) 268 * 269 * r0 = va(pagetable) - (VA - PA) 270 * = va(pagetable) - VA + PA 271 * = pa(pagetable) 272 */ 273 ldr r1, [r0, #4] 274 sub r0, r1, r2 275 mov pc, lr 276 277/* 278 * Init MMU 279 * r0 - the table base address 280 */ 281 282ASENTRY_NP(init_mmu) 283 284 /* Setup TLB and MMU registers */ 285 mcr CP15_TTBR0(r0) /* Set TTB */ 286 mov r0, #0 287 mcr CP15_CONTEXTIDR(r0) /* Set ASID to 0 */ 288 289 /* Set the Domain Access register */ 290 mov r0, #DOMAIN_CLIENT /* Only domain #0 is used */ 291 mcr CP15_DACR(r0) 292 293 /* 294 * Ensure that LPAE is disabled and that TTBR0 is used for translation, 295 * use a 16KB translation table 296 */ 297 mov r0, #0 298 mcr CP15_TTBCR(r0) 299 300 /* 301 * Set TEX remap registers 302 * - All is set to uncacheable memory 303 */ 304 ldr r0, =0xAAAAA 305 mcr CP15_PRRR(r0) 306 mov r0, #0 307 mcr CP15_NMRR(r0) 308 mcr CP15_TLBIALL /* Flush TLB */ 309 DSB 310 ISB 311 312 /* Enable MMU */ 313 mrc CP15_SCTLR(r0) 314 orr r0, r0, #CPU_CONTROL_MMU_ENABLE 315 orr r0, r0, #CPU_CONTROL_V6_EXTPAGE 316 orr r0, r0, #CPU_CONTROL_TR_ENABLE 317 orr r0, r0, #CPU_CONTROL_AF_ENABLE 318 mcr CP15_SCTLR(r0) 319 DSB 320 ISB 321 mcr CP15_TLBIALL /* Flush TLB */ 322 mcr CP15_BPIALL /* Flush Branch predictor */ 323 DSB 324 ISB 325 326 mov pc, lr 327END(init_mmu) 328 329 330/* 331 * Init SMP coherent mode, enable caching and switch to final MMU table. 332 * Called with disabled caches 333 * r0 - The table base address 334 * r1 - clear bits for aux register 335 * r2 - set bits for aux register 336 */ 337ASENTRY_NP(reinit_mmu) 338 push {r4-r11, lr} 339 mov r4, r0 340 mov r5, r1 341 mov r6, r2 342 343 /* !! Be very paranoid here !! */ 344 /* !! We cannot write single bit here !! */ 345 346#if 0 /* XXX writeback shouldn't be necessary */ 347 /* Write back and invalidate all integrated caches */ 348 bl dcache_wbinv_poc_all 349#else 350 bl dcache_inv_pou_all 351#endif 352 mcr CP15_ICIALLU 353 DSB 354 ISB 355 356 /* Set auxiliary register */ 357 mrc CP15_ACTLR(r7) 358 bic r8, r7, r5 /* Mask bits */ 359 eor r8, r8, r6 /* Set bits */ 360 teq r7, r8 361 mcrne CP15_ACTLR(r8) 362 DSB 363 ISB 364 365 /* Enable caches. */ 366 mrc CP15_SCTLR(r7) 367 orr r7, #CPU_CONTROL_DC_ENABLE 368 orr r7, #CPU_CONTROL_IC_ENABLE 369 orr r7, #CPU_CONTROL_BPRD_ENABLE 370 mcr CP15_SCTLR(r7) 371 DSB 372 373 mcr CP15_TTBR0(r4) /* Set new TTB */ 374 DSB 375 ISB 376 377 mcr CP15_TLBIALL /* Flush TLB */ 378 mcr CP15_BPIALL /* Flush Branch predictor */ 379 DSB 380 ISB 381 382#if 0 /* XXX writeback shouldn't be necessary */ 383 /* Write back and invalidate all integrated caches */ 384 bl dcache_wbinv_poc_all 385#else 386 bl dcache_inv_pou_all 387#endif 388 mcr CP15_ICIALLU 389 DSB 390 ISB 391 392 pop {r4-r11, pc} 393END(reinit_mmu) 394 395 396/* 397 * Builds the page table 398 * r0 - The table base address 399 * r1 - The physical address (trashed) 400 * r2 - The virtual address (trashed) 401 * r3 - The number of 1MiB sections 402 * r4 - Trashed 403 * 404 * Addresses must be 1MiB aligned 405 */ 406build_device_pagetables: 407 ldr r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0 408 b 1f 409build_pagetables: 410 /* Set the required page attributed */ 411 ldr r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0 4121: 413 orr r1, r4 414 415 /* Move the virtual address to the correct bit location */ 416 lsr r2, #(PTE1_SHIFT - 2) 417 418 mov r4, r3 4192: 420 str r1, [r0, r2] 421 add r2, r2, #4 422 add r1, r1, #(PTE1_SIZE) 423 adds r4, r4, #-1 424 bhi 2b 425 426 mov pc, lr 427 428VA_TO_PA_POINTER(Lpagetable, boot_pt1) 429 430 .global _C_LABEL(hypmode_enabled) 431_C_LABEL(hypmode_enabled): 432 .word 0 433 434.Lstart: 435 .word _edata /* Note that these three items are */ 436 .word _ebss /* loaded with a single ldmia and */ 437 .word svcstk /* must remain in order together. */ 438 439.Lmainreturned: 440 .asciz "main() returned" 441 .align 2 442 443 .bss 444svcstk: 445 .space INIT_ARM_STACK_SIZE * MAXCPU 446 447/* 448 * Memory for the initial pagetable. We are unable to place this in 449 * the bss as this will be cleared after the table is loaded. 450 */ 451 .section ".init_pagetable", "aw", %nobits 452 .align 14 /* 16KiB aligned */ 453 .globl boot_pt1 454boot_pt1: 455 .space L1_TABLE_SIZE 456 457 .text 458 .align 2 459 460#if defined(SMP) 461 462ASENTRY_NP(mpentry) 463 /* Make sure interrupts are disabled. */ 464 cpsid ifa 465 466 HANDLE_HYP 467 468 /* Setup core, disable all caches. */ 469 mrc CP15_SCTLR(r0) 470 bic r0, #CPU_CONTROL_MMU_ENABLE 471 bic r0, #CPU_CONTROL_AFLT_ENABLE 472 bic r0, #CPU_CONTROL_DC_ENABLE 473 bic r0, #CPU_CONTROL_IC_ENABLE 474 bic r0, #CPU_CONTROL_BPRD_ENABLE 475 bic r0, #CPU_CONTROL_SW_ENABLE 476 orr r0, #CPU_CONTROL_UNAL_ENABLE 477 orr r0, #CPU_CONTROL_VECRELOC 478 mcr CP15_SCTLR(r0) 479 DSB 480 ISB 481 482 /* Invalidate L1 cache I+D cache */ 483 bl dcache_inv_pou_all 484 mcr CP15_ICIALLU 485 DSB 486 ISB 487 488 /* Find the delta between VA and PA */ 489 adr r0, Lpagetable 490 bl translate_va_to_pa 491 492 bl init_mmu 493 494 adr r1, .Lstart+8 /* Get initstack pointer from */ 495 ldr sp, [r1] /* startup data. */ 496 mrc CP15_MPIDR(r0) /* Get processor id number. */ 497 and r0, r0, #0x0f 498 mov r1, #INIT_ARM_STACK_SIZE 499 mul r2, r1, r0 /* Point sp to initstack */ 500 add sp, sp, r2 /* area for this processor. */ 501 502 /* Switch to virtual addresses. */ 503 ldr pc, =1f 5041: 505 mov fp, #0 /* trace back starts here */ 506 bl _C_LABEL(init_secondary)/* Off we go, cpu id in r0. */ 507 508 adr r0, .Lmpreturned 509 b _C_LABEL(panic) 510 /* NOTREACHED */ 511END(mpentry) 512 513.Lmpreturned: 514 .asciz "init_secondary() returned" 515 .align 2 516#endif 517 518ENTRY_NP(cpu_halt) 519 520 /* XXX re-implement !!! */ 521 cpsid ifa 522 bl dcache_wbinv_poc_all 523 524 ldr r4, .Lcpu_reset_address 525 ldr r4, [r4] 526 teq r4, #0 527 movne pc, r4 5281: 529 WFI 530 b 1b 531 532 /* 533 * _cpu_reset_address contains the address to branch to, to complete 534 * the cpu reset after turning the MMU off 535 * This variable is provided by the hardware specific code 536 */ 537.Lcpu_reset_address: 538 .word _C_LABEL(cpu_reset_address) 539END(cpu_halt) 540 541 542/* 543 * setjump + longjmp 544 */ 545ENTRY(setjmp) 546 stmia r0, {r4-r14} 547 mov r0, #0x00000000 548 RET 549END(setjmp) 550 551ENTRY(longjmp) 552 ldmia r0, {r4-r14} 553 mov r0, #0x00000001 554 RET 555END(longjmp) 556 557 .data 558 .global _C_LABEL(esym) 559_C_LABEL(esym): .word _C_LABEL(end) 560 561ENTRY_NP(abort) 562 b _C_LABEL(abort) 563END(abort) 564 565ENTRY_NP(sigcode) 566 mov r0, sp 567 add r0, r0, #SIGF_UC 568 569 /* 570 * Call the sigreturn system call. 571 * 572 * We have to load r7 manually rather than using 573 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is 574 * correct. Using the alternative places esigcode at the address 575 * of the data rather than the address one past the data. 576 */ 577 578 ldr r7, [pc, #12] /* Load SYS_sigreturn */ 579 swi SYS_sigreturn 580 581 /* Well if that failed we better exit quick ! */ 582 583 ldr r7, [pc, #8] /* Load SYS_exit */ 584 swi SYS_exit 585 586 /* Branch back to retry SYS_sigreturn */ 587 b . - 16 588END(sigcode) 589 .word SYS_sigreturn 590 .word SYS_exit 591 592 .align 2 593 .global _C_LABEL(esigcode) 594 _C_LABEL(esigcode): 595 596 .data 597 .global szsigcode 598szsigcode: 599 .long esigcode-sigcode 600 601/* End of locore.S */ 602