1/*- 2 * Copyright (c) 2012-2014 Andrew Turner 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include "assym.inc" 28#include "opt_kstack_pages.h" 29#include <sys/elf_common.h> 30#include <sys/syscall.h> 31#include <machine/asm.h> 32#include <machine/armreg.h> 33#include <machine/cpu.h> 34#include <machine/hypervisor.h> 35#include <machine/param.h> 36#include <machine/pte.h> 37#include <machine/vm.h> 38#include <machine/vmparam.h> 39 40#define VIRT_BITS 48 41 42#if PAGE_SIZE == PAGE_SIZE_16K 43/* 44 * The number of level 3 tables to create. 32 will allow for 1G of address 45 * space, the same as a single level 2 page with 4k pages. 46 */ 47#define L3_PAGE_COUNT 32 48#endif 49 50/* 51 * The size of our bootstrap stack. 52 */ 53#define BOOT_STACK_SIZE (KSTACK_PAGES * PAGE_SIZE) 54 55 .globl kernbase 56 .set kernbase, KERNBASE 57 58/* 59 * We assume: 60 * MMU on with an identity map, or off 61 * D-Cache: off 62 * I-Cache: on or off 63 * We are loaded at a 2MiB aligned address 64 */ 65 66ENTRY(_start) 67 /* Enter the kernel exception level */ 68 bl enter_kernel_el 69 70 /* Set the context id */ 71 msr contextidr_el1, xzr 72 73 /* Get the virt -> phys offset */ 74 bl get_load_phys_addr 75 76 /* 77 * At this point: 78 * x28 = Our physical load address 79 */ 80 81 /* Create the page tables */ 82 bl create_pagetables 83 84 /* 85 * At this point: 86 * x27 = TTBR0 table 87 * x26 = Kernel L1 table 88 * x24 = TTBR1 table 89 */ 90 91 /* Enable the mmu */ 92 bl start_mmu 93 94 /* Load the new ttbr0 pagetable */ 95 adrp x27, pagetable_l0_ttbr0 96 add x27, x27, :lo12:pagetable_l0_ttbr0 97 98 /* Jump to the virtual address space */ 99 ldr x15, .Lvirtdone 100 br x15 101 102virtdone: 103 BTI_J 104 105 /* Set up the stack */ 106 adrp x25, initstack_end 107 add x25, x25, :lo12:initstack_end 108 sub sp, x25, #PCB_SIZE 109 110 /* Zero the BSS */ 111 ldr x15, .Lbss 112 ldr x14, .Lend 1131: 114 stp xzr, xzr, [x15], #16 115 cmp x15, x14 116 b.lo 1b 117 118#if defined(PERTHREAD_SSP) 119 /* Set sp_el0 to the boot canary for early per-thread SSP to work */ 120 adrp x15, boot_canary 121 add x15, x15, :lo12:boot_canary 122 msr sp_el0, x15 123#endif 124 125 /* Backup the module pointer */ 126 mov x1, x0 127 128 sub sp, sp, #BOOTPARAMS_SIZE 129 mov x0, sp 130 131 str x1, [x0, #BP_MODULEP] 132 adrp x25, initstack 133 add x25, x25, :lo12:initstack 134 str x25, [x0, #BP_KERN_STACK] 135 str x27, [x0, #BP_KERN_TTBR0] 136 str x23, [x0, #BP_BOOT_EL] 137 138#ifdef KASAN 139 /* Save bootparams */ 140 mov x19, x0 141 142 /* Bootstrap an early shadow map for the boot stack. */ 143 ldr x0, [x0, #BP_KERN_STACK] 144 ldr x1, =BOOT_STACK_SIZE 145 bl kasan_init_early 146 147 /* Restore bootparams */ 148 mov x0, x19 149#endif 150 151 /* trace back starts here */ 152 mov fp, #0 153 /* Branch to C code */ 154 bl initarm 155 /* We are done with the boot params */ 156 add sp, sp, #BOOTPARAMS_SIZE 157 158 /* 159 * Enable pointer authentication in the kernel. We set the keys for 160 * thread0 in initarm so have to wait until it returns to enable it. 161 * If we were to enable it in initarm then any authentication when 162 * returning would fail as it was called with pointer authentication 163 * disabled. 164 */ 165 bl ptrauth_start 166 167 bl mi_startup 168 169 /* We should not get here */ 170 brk 0 171 172 .align 3 173.Lvirtdone: 174 .quad virtdone 175.Lbss: 176 .quad __bss_start 177.Lend: 178 .quad __bss_end 179END(_start) 180 181#ifdef SMP 182/* 183 * void 184 * mpentry_psci(unsigned long) 185 * 186 * Called by a core when it is being brought online with psci. 187 * The data in x0 is passed straight to init_secondary. 188 */ 189ENTRY(mpentry_psci) 190 mov x26, xzr 191 b mpentry_common 192END(mpentry_psci) 193 194/* 195 * void 196 * mpentry_spintable(void) 197 * 198 * Called by a core when it is being brought online with a spin-table. 199 * Reads the new CPU ID and passes this to init_secondary. 200 */ 201ENTRY(mpentry_spintable) 202 ldr x26, =spintable_wait 203 b mpentry_common 204END(mpentry_spintable) 205 206/* Wait for the current CPU to be released */ 207LENTRY(spintable_wait) 208 /* Read the affinity bits from mpidr_el1 */ 209 mrs x1, mpidr_el1 210 ldr x2, =CPU_AFF_MASK 211 and x1, x1, x2 212 213 adrp x2, ap_cpuid 2141: 215 ldr x0, [x2, :lo12:ap_cpuid] 216 cmp x0, x1 217 b.ne 1b 218 219 str xzr, [x2, :lo12:ap_cpuid] 220 dsb sy 221 sev 222 223 ret 224LEND(mpentry_spintable) 225 226LENTRY(mpentry_common) 227 /* Disable interrupts */ 228 msr daifset, #DAIF_INTR 229 230 /* Enter the kernel exception level */ 231 bl enter_kernel_el 232 233 /* Set the context id */ 234 msr contextidr_el1, xzr 235 236 /* Load the kernel page table */ 237 adrp x24, pagetable_l0_ttbr1 238 add x24, x24, :lo12:pagetable_l0_ttbr1 239 /* Load the identity page table */ 240 adrp x27, pagetable_l0_ttbr0_bootstrap 241 add x27, x27, :lo12:pagetable_l0_ttbr0_bootstrap 242 243 /* Enable the mmu */ 244 bl start_mmu 245 246 /* Load the new ttbr0 pagetable */ 247 adrp x27, pagetable_l0_ttbr0 248 add x27, x27, :lo12:pagetable_l0_ttbr0 249 250 /* Jump to the virtual address space */ 251 ldr x15, =mp_virtdone 252 br x15 253 254mp_virtdone: 255 BTI_J 256 257 /* 258 * Allow this CPU to wait until the kernel is ready for it, 259 * e.g. with spin-table but each CPU uses the same release address 260 */ 261 cbz x26, 1f 262 blr x26 2631: 264 265 /* Start using the AP boot stack */ 266 adrp x4, bootstack 267 ldr x4, [x4, :lo12:bootstack] 268 mov sp, x4 269 270#if defined(PERTHREAD_SSP) 271 /* Set sp_el0 to the boot canary for early per-thread SSP to work */ 272 adrp x15, boot_canary 273 add x15, x15, :lo12:boot_canary 274 msr sp_el0, x15 275#endif 276 277 /* Load the kernel ttbr0 pagetable */ 278 msr ttbr0_el1, x27 279 isb 280 281 /* Invalidate the TLB */ 282 tlbi vmalle1 283 dsb sy 284 isb 285 286 /* 287 * Initialize the per-CPU pointer before calling into C code, for the 288 * benefit of kernel sanitizers. 289 */ 290 adrp x18, bootpcpu 291 ldr x18, [x18, :lo12:bootpcpu] 292 msr tpidr_el1, x18 293 294 b init_secondary 295LEND(mpentry_common) 296#endif 297 298/* 299 * Enter the exception level the kernel will use: 300 * 301 * - If in EL1 continue in EL1 302 * - If the CPU supports FEAT_VHE then set HCR_E2H and HCR_TGE and continue 303 * in EL2 304 * - Configure EL2 to support running the kernel at EL1 and exit to that 305 */ 306LENTRY(enter_kernel_el) 307#define INIT_SCTLR_EL1 (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_EIS | \ 308 SCTLR_TSCXT | SCTLR_EOS) 309 mrs x23, CurrentEL 310 and x23, x23, #(CURRENTEL_EL_MASK) 311 cmp x23, #(CURRENTEL_EL_EL2) 312 b.eq 1f 313 314 ldr x2, =INIT_SCTLR_EL1 315 msr sctlr_el1, x2 316 /* SCTLR_EOS is set so eret is a context synchronizing event so we 317 * need an isb here to ensure it's observed by later instructions, 318 * but don't need it in the eret below. 319 */ 320 isb 321 322 /* Ensure SPSR_EL1 and pstate are in sync. The only wat to set the 323 * latter is to set the former and return from an exception with eret. 324 */ 325 mov x2, #(PSR_DAIF | PSR_M_EL1h) 326 msr spsr_el1, x2 327 msr elr_el1, lr 328 eret 329 3301: 331 dsb sy 332 /* 333 * Set just the reserved bits in sctlr_el2. This will disable the 334 * MMU which may have broken the kernel if we enter the kernel in 335 * EL2, e.g. when using VHE. 336 */ 337 ldr x2, =(SCTLR_EL2_RES1 | SCTLR_EL2_EIS | SCTLR_EL2_EOS) 338 msr sctlr_el2, x2 339 isb 340 341 /* Configure the Hypervisor */ 342 ldr x2, =(HCR_RW | HCR_APK | HCR_API | HCR_E2H) 343 msr hcr_el2, x2 344 345 /* Stash value of HCR_EL2 for later */ 346 isb 347 mrs x4, hcr_el2 348 349 350 /* Load the Virtualization Process ID Register */ 351 mrs x2, midr_el1 352 msr vpidr_el2, x2 353 354 /* Load the Virtualization Multiprocess ID Register */ 355 mrs x2, mpidr_el1 356 msr vmpidr_el2, x2 357 358 /* Set the initial sctlr_el1 */ 359 ldr x2, =INIT_SCTLR_EL1 360 msr sctlr_el1, x2 361 362 /* Check if the E2H flag is set */ 363 tst x4, #HCR_E2H 364 b.eq .Lno_vhe 365 366 /* 367 * The kernel will be running in EL2, route exceptions here rather 368 * than EL1. 369 */ 370 orr x4, x4, #(HCR_TGE) 371 msr hcr_el2, x4 372 isb 373 374 msr SCTLR_EL12_REG, x2 375 ldr x2, =(CPTR_FPEN) 376 ldr x3, =(CNTHCTL_E2H_EL1PCTEN | CNTHCTL_E2H_EL1PTEN) 377 ldr x5, =(PSR_DAIF | PSR_M_EL2h) 378 b .Ldone_vhe 379 380.Lno_vhe: 381 /* Hypervisor trap functions */ 382 adrp x2, hyp_stub_vectors 383 add x2, x2, :lo12:hyp_stub_vectors 384 msr vbar_el2, x2 385 386 ldr x2, =(CPTR_RES1) 387 ldr x3, =(CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN) 388 ldr x5, =(PSR_DAIF | PSR_M_EL1h) 389 390.Ldone_vhe: 391 392 msr cptr_el2, x2 393 /* Enable access to the physical timers at EL1 */ 394 msr cnthctl_el2, x3 395 /* Set the return PSTATE */ 396 msr spsr_el2, x5 397 398 /* Don't trap to EL2 for CP15 traps */ 399 msr hstr_el2, xzr 400 401 /* Set the counter offset to a known value */ 402 msr cntvoff_el2, xzr 403 404 /* Zero vttbr_el2 so a hypervisor can tell the host and guest apart */ 405 msr vttbr_el2, xzr 406 407 /* Configure GICv3 CPU interface */ 408 mrs x2, id_aa64pfr0_el1 409 /* Extract GIC bits from the register */ 410 ubfx x2, x2, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_BITS 411 /* GIC[3:0] != 0000 - GIC CPU interface via special regs. supported */ 412 cbz x2, 2f 413 414 mrs x2, icc_sre_el2 415 orr x2, x2, #ICC_SRE_EL2_EN /* Enable access from insecure EL1 */ 416 orr x2, x2, #ICC_SRE_EL2_SRE /* Enable system registers */ 417 msr icc_sre_el2, x2 4182: 419 420 /* Set the address to return to our return address */ 421 msr elr_el2, x30 422 isb 423 424 eret 425#undef INIT_SCTLR_EL1 426LEND(enter_kernel_el) 427 428/* 429 * Get the physical address the kernel was loaded at. 430 */ 431LENTRY(get_load_phys_addr) 432 /* Load the offset of get_load_phys_addr from KERNBASE */ 433 ldr x28, =(get_load_phys_addr - KERNBASE) 434 /* Load the physical address of get_load_phys_addr */ 435 adr x29, get_load_phys_addr 436 /* Find the physical address of KERNBASE, i.e. our load address */ 437 sub x28, x29, x28 438 ret 439LEND(get_load_phys_addr) 440 441/* 442 * This builds the page tables containing the identity map, and the kernel 443 * virtual map. 444 * 445 * It relys on: 446 * We were loaded to an address that is on a 2MiB boundary 447 * All the memory must not cross a 1GiB boundaty 448 * x28 contains the physical address we were loaded from 449 * 450 * There are 7 or 8 pages before that address for the page tables 451 * The pages used are: 452 * - The Kernel L3 tables (only for 16k kernel) 453 * - The Kernel L2 table 454 * - The Kernel L1 table 455 * - The Kernel L0 table (TTBR1) 456 * - The identity (PA = VA) L2 table 457 * - The identity (PA = VA) L1 table 458 * - The identity (PA = VA) L0 table (Early TTBR0) 459 * - The Kernel empty L0 table (Late TTBR0) 460 */ 461LENTRY(create_pagetables) 462 /* Save the Link register */ 463 mov x5, x30 464 465 /* Clean the page table */ 466 adrp x6, pagetable 467 add x6, x6, :lo12:pagetable 468 mov x26, x6 469 adrp x27, pagetable_end 470 add x27, x27, :lo12:pagetable_end 4711: 472 stp xzr, xzr, [x6], #16 473 stp xzr, xzr, [x6], #16 474 stp xzr, xzr, [x6], #16 475 stp xzr, xzr, [x6], #16 476 cmp x6, x27 477 b.lo 1b 478 479 /* 480 * Build the TTBR1 maps. 481 */ 482 483 /* Find the size of the kernel */ 484 mov x6, #(KERNBASE) 485 486#if defined(LINUX_BOOT_ABI) 487 /* X19 is used as 'map FDT data' flag */ 488 mov x19, xzr 489 490 /* No modules or FDT pointer ? */ 491 cbz x0, booti_no_fdt 492 493 /* 494 * Test if x0 points to modules descriptor(virtual address) or 495 * to FDT (physical address) 496 */ 497 cmp x0, x6 /* x6 is #(KERNBASE) */ 498 b.lo booti_fdt 499#endif 500 501 /* Booted with modules pointer */ 502 /* Find modulep - begin */ 503 sub x8, x0, x6 504 /* 505 * Add space for the module data. When PAGE_SIZE is 4k this will 506 * add at least 2 level 2 blocks (2 * 2MiB). When PAGE_SIZE is 507 * larger it will be at least as large as we use smaller level 3 508 * pages. 509 */ 510 ldr x7, =((6 * 1024 * 1024) - 1) 511 add x8, x8, x7 512 b common 513 514#if defined(LINUX_BOOT_ABI) 515booti_fdt: 516 /* Booted by U-Boot booti with FDT data */ 517 /* Set 'map FDT data' flag */ 518 mov x19, #1 519 520booti_no_fdt: 521 /* Booted by U-Boot booti without FTD data */ 522 /* Find the end - begin */ 523 ldr x7, .Lend 524 sub x8, x7, x6 525 526 /* 527 * Add one 2MiB page for copy of FDT data (maximum FDT size), 528 * one for metadata and round up 529 */ 530 ldr x7, =(3 * L2_SIZE - 1) 531 add x8, x8, x7 532#endif 533 534common: 535#if PAGE_SIZE != PAGE_SIZE_4K 536 /* 537 * Create L3 and L3C pages. The kernel will be loaded at a 2M aligned 538 * address, enabling the creation of L3C pages. However, when the page 539 * size is larger than 4k, L2 blocks are too large to map the kernel 540 * with 2M alignment. 541 */ 542#define PTE_SHIFT L3_SHIFT 543#define BUILD_PTE_FUNC build_l3_page_pagetable 544#else 545#define PTE_SHIFT L2_SHIFT 546#define BUILD_PTE_FUNC build_l2_block_pagetable 547#endif 548 549 /* Get the number of blocks/pages to allocate, rounded down */ 550 lsr x10, x8, #(PTE_SHIFT) 551 552 /* Create the kernel space PTE table */ 553 mov x6, x26 554 mov x7, #(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) 555 mov x8, #(KERNBASE) 556 mov x9, x28 557 bl BUILD_PTE_FUNC 558 559#undef PTE_SHIFT 560#undef BUILD_PTE_FUNC 561 562#if PAGE_SIZE != PAGE_SIZE_4K 563 /* Move to the l2 table */ 564 ldr x9, =(PAGE_SIZE * L3_PAGE_COUNT) 565 add x26, x26, x9 566 567 /* Link the l2 -> l3 table */ 568 mov x9, x6 569 mov x6, x26 570 bl link_l2_pagetable 571#endif 572 573 /* Move to the l1 table */ 574 add x26, x26, #PAGE_SIZE 575 576 /* Link the l1 -> l2 table */ 577 mov x9, x6 578 mov x6, x26 579 bl link_l1_pagetable 580 581 /* Move to the l0 table */ 582 add x24, x26, #PAGE_SIZE 583 584 /* Link the l0 -> l1 table */ 585 mov x9, x6 586 mov x6, x24 587 mov x10, #1 588 bl link_l0_pagetable 589 590 /* 591 * Build the TTBR0 maps. As TTBR0 maps, they must specify ATTR_S1_nG. 592 * They are only needed early on, so the VA = PA map is uncached. 593 */ 594 add x27, x24, #PAGE_SIZE 595 596 mov x6, x27 /* The initial page table */ 597 598 /* Create the VA = PA map */ 599 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) 600 adrp x16, _start 601 and x16, x16, #(~L2_OFFSET) 602 mov x9, x16 /* PA start */ 603 mov x8, x16 /* VA start (== PA start) */ 604 mov x10, #1 605 bl build_l2_block_pagetable 606 607#if defined(SOCDEV_PA) 608 /* Create a table for the UART */ 609 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_DEVICE)) 610 ldr x9, =(L2_SIZE) 611 add x16, x16, x9 /* VA start */ 612 mov x8, x16 613 614 /* Store the socdev virtual address */ 615 add x17, x8, #(SOCDEV_PA & L2_OFFSET) 616 adrp x9, socdev_va 617 str x17, [x9, :lo12:socdev_va] 618 619 mov x9, #(SOCDEV_PA & ~L2_OFFSET) /* PA start */ 620 mov x10, #1 621 bl build_l2_block_pagetable 622#endif 623 624#if defined(LINUX_BOOT_ABI) 625 /* Map FDT data ? */ 626 cbz x19, 1f 627 628 /* Create the mapping for FDT data (2 MiB max) */ 629 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) 630 ldr x9, =(L2_SIZE) 631 add x16, x16, x9 /* VA start */ 632 mov x8, x16 633 mov x9, x0 /* PA start */ 634 /* Update the module pointer to point at the allocated memory */ 635 and x0, x0, #(L2_OFFSET) /* Keep the lower bits */ 636 add x0, x0, x8 /* Add the aligned virtual address */ 637 638 mov x10, #1 639 bl build_l2_block_pagetable 640 6411: 642#endif 643 644 /* Move to the l1 table */ 645 add x27, x27, #PAGE_SIZE 646 647 /* Link the l1 -> l2 table */ 648 mov x9, x6 649 mov x6, x27 650 bl link_l1_pagetable 651 652 /* Move to the l0 table */ 653 add x27, x27, #PAGE_SIZE 654 655 /* Link the l0 -> l1 table */ 656 mov x9, x6 657 mov x6, x27 658 mov x10, #1 659 bl link_l0_pagetable 660 661 /* Restore the Link register */ 662 mov x30, x5 663 ret 664LEND(create_pagetables) 665 666/* 667 * Builds an L0 -> L1 table descriptor 668 * 669 * x6 = L0 table 670 * x8 = Virtual Address 671 * x9 = L1 PA (trashed) 672 * x10 = Entry count (trashed) 673 * x11, x12 and x13 are trashed 674 */ 675LENTRY(link_l0_pagetable) 676 /* 677 * Link an L0 -> L1 table entry. 678 */ 679 /* Find the table index */ 680 lsr x11, x8, #L0_SHIFT 681 and x11, x11, #L0_ADDR_MASK 682 683 /* Build the L0 block entry */ 684 mov x12, #L0_TABLE 685 orr x12, x12, #(TATTR_UXN_TABLE | TATTR_AP_TABLE_NO_EL0) 686 687 /* Only use the output address bits */ 688 lsr x9, x9, #PAGE_SHIFT 6891: orr x13, x12, x9, lsl #PAGE_SHIFT 690 691 /* Store the entry */ 692 str x13, [x6, x11, lsl #3] 693 694 sub x10, x10, #1 695 add x11, x11, #1 696 add x9, x9, #1 697 cbnz x10, 1b 698 699 ret 700LEND(link_l0_pagetable) 701 702/* 703 * Builds an L1 -> L2 table descriptor 704 * 705 * x6 = L1 table 706 * x8 = Virtual Address 707 * x9 = L2 PA (trashed) 708 * x11, x12 and x13 are trashed 709 */ 710LENTRY(link_l1_pagetable) 711 /* 712 * Link an L1 -> L2 table entry. 713 */ 714 /* Find the table index */ 715 lsr x11, x8, #L1_SHIFT 716 and x11, x11, #Ln_ADDR_MASK 717 718 /* Build the L1 block entry */ 719 mov x12, #L1_TABLE 720 721 /* Only use the output address bits */ 722 lsr x9, x9, #PAGE_SHIFT 723 orr x13, x12, x9, lsl #PAGE_SHIFT 724 725 /* Store the entry */ 726 str x13, [x6, x11, lsl #3] 727 728 ret 729LEND(link_l1_pagetable) 730 731/* 732 * Builds count 2 MiB page table entry 733 * x6 = L2 table 734 * x7 = Block attributes 735 * x8 = VA start 736 * x9 = PA start (trashed) 737 * x10 = Entry count (trashed) 738 * x11, x12 and x13 are trashed 739 */ 740LENTRY(build_l2_block_pagetable) 741 /* 742 * Build the L2 table entry. 743 */ 744 /* Find the table index */ 745 lsr x11, x8, #L2_SHIFT 746 and x11, x11, #Ln_ADDR_MASK 747 748 /* Build the L2 block entry */ 749 orr x12, x7, #L2_BLOCK 750 orr x12, x12, #(ATTR_DEFAULT) 751 orr x12, x12, #(ATTR_S1_UXN) 752#ifdef __ARM_FEATURE_BTI_DEFAULT 753 orr x12, x12, #(ATTR_S1_GP) 754#endif 755 756 /* Only use the output address bits */ 757 lsr x9, x9, #L2_SHIFT 758 759 /* Set the physical address for this virtual address */ 7601: orr x13, x12, x9, lsl #L2_SHIFT 761 762 /* Store the entry */ 763 str x13, [x6, x11, lsl #3] 764 765 sub x10, x10, #1 766 add x11, x11, #1 767 add x9, x9, #1 768 cbnz x10, 1b 769 770 ret 771LEND(build_l2_block_pagetable) 772 773#if PAGE_SIZE != PAGE_SIZE_4K 774/* 775 * Builds an L2 -> L3 table descriptor 776 * 777 * x6 = L2 table 778 * x8 = Virtual Address 779 * x9 = L3 PA (trashed) 780 * x11, x12 and x13 are trashed 781 */ 782LENTRY(link_l2_pagetable) 783 /* 784 * Link an L2 -> L3 table entry. 785 */ 786 /* Find the table index */ 787 lsr x11, x8, #L2_SHIFT 788 and x11, x11, #Ln_ADDR_MASK 789 790 /* Build the L1 block entry */ 791 mov x12, #L2_TABLE 792 793 /* Only use the output address bits */ 794 lsr x9, x9, #PAGE_SHIFT 795 orr x13, x12, x9, lsl #PAGE_SHIFT 796 797 /* Store the entry */ 798 str x13, [x6, x11, lsl #3] 799 800 ret 801LEND(link_l2_pagetable) 802 803/* 804 * Builds count level 3 page table entries. Uses ATTR_CONTIGUOUS to create 805 * large page (L3C) mappings when the current VA and remaining count allow 806 * it. 807 * x6 = L3 table 808 * x7 = Block attributes 809 * x8 = VA start 810 * x9 = PA start (trashed) 811 * x10 = Entry count (trashed) 812 * x11, x12 and x13 are trashed 813 * 814 * VA start (x8) modulo L3C_SIZE must equal PA start (x9) modulo L3C_SIZE. 815 */ 816LENTRY(build_l3_page_pagetable) 817 /* 818 * Build the L3 table entry. 819 */ 820 /* Find the table index */ 821 lsr x11, x8, #L3_SHIFT 822 and x11, x11, #Ln_ADDR_MASK 823 824 /* Build the L3 page entry */ 825 orr x12, x7, #L3_PAGE 826 orr x12, x12, #(ATTR_DEFAULT) 827 orr x12, x12, #(ATTR_S1_UXN) 828#ifdef __ARM_FEATURE_BTI_DEFAULT 829 orr x12, x12, #(ATTR_S1_GP) 830#endif 831 832 /* Only use the output address bits */ 833 lsr x9, x9, #L3_SHIFT 834 835 /* Check if an ATTR_CONTIGUOUS mapping is possible */ 8361: tst x11, #(L3C_ENTRIES - 1) 837 b.ne 2f 838 cmp x10, #L3C_ENTRIES 839 b.lo 3f 840 orr x12, x12, #(ATTR_CONTIGUOUS) 841 b 2f 8423: and x12, x12, #(~ATTR_CONTIGUOUS) 843 844 /* Set the physical address for this virtual address */ 8452: orr x13, x12, x9, lsl #L3_SHIFT 846 847 /* Store the entry */ 848 str x13, [x6, x11, lsl #3] 849 850 sub x10, x10, #1 851 add x11, x11, #1 852 add x9, x9, #1 853 cbnz x10, 1b 854 855 ret 856LEND(build_l3_page_pagetable) 857#endif 858 859LENTRY(start_mmu) 860 dsb sy 861 862 /* Load the exception vectors */ 863 ldr x2, =exception_vectors 864 msr vbar_el1, x2 865 866 /* Load ttbr0 and ttbr1 */ 867 msr ttbr0_el1, x27 868 msr ttbr1_el1, x24 869 isb 870 871 /* Clear the Monitor Debug System control register */ 872 msr mdscr_el1, xzr 873 874 /* Invalidate the TLB */ 875 tlbi vmalle1is 876 dsb ish 877 isb 878 879 ldr x2, mair 880 msr mair_el1, x2 881 882 /* 883 * Setup TCR according to the PARange and ASIDBits fields 884 * from ID_AA64MMFR0_EL1 and the HAFDBS field from the 885 * ID_AA64MMFR1_EL1. More precisely, set TCR_EL1.AS 886 * to 1 only if the ASIDBits field equals 0b0010. 887 */ 888 ldr x2, tcr 889 mrs x3, id_aa64mmfr0_el1 890 891 /* Copy the bottom 3 bits from id_aa64mmfr0_el1 into TCR.IPS */ 892 bfi x2, x3, #(TCR_IPS_SHIFT), #(TCR_IPS_WIDTH) 893 and x3, x3, #(ID_AA64MMFR0_ASIDBits_MASK) 894 895 /* Check if the HW supports 16 bit ASIDS */ 896 cmp x3, #(ID_AA64MMFR0_ASIDBits_16) 897 /* If so x3 == 1, else x3 == 0 */ 898 cset x3, eq 899 /* Set TCR.AS with x3 */ 900 bfi x2, x3, #(TCR_ASID_SHIFT), #(TCR_ASID_WIDTH) 901 902 /* 903 * Check if the HW supports access flag and dirty state updates, 904 * and set TCR_EL1.HA and TCR_EL1.HD accordingly. 905 */ 906 mrs x3, id_aa64mmfr1_el1 907 and x3, x3, #(ID_AA64MMFR1_HAFDBS_MASK) 908 cmp x3, #1 909 b.ne 1f 910 orr x2, x2, #(TCR_HA) 911 b 2f 9121: 913 cmp x3, #2 914 b.ne 2f 915 orr x2, x2, #(TCR_HA | TCR_HD) 9162: 917 msr tcr_el1, x2 918 919 /* 920 * Setup SCTLR. 921 */ 922 ldr x2, sctlr_set 923 ldr x3, sctlr_clear 924 mrs x1, sctlr_el1 925 bic x1, x1, x3 /* Clear the required bits */ 926 orr x1, x1, x2 /* Set the required bits */ 927 msr sctlr_el1, x1 928 isb 929 930 ret 931 932 .align 3 933mair: 934 .quad MAIR_ATTR(MAIR_DEVICE_nGnRnE, VM_MEMATTR_DEVICE_nGnRnE) | \ 935 MAIR_ATTR(MAIR_NORMAL_NC, VM_MEMATTR_UNCACHEABLE) | \ 936 MAIR_ATTR(MAIR_NORMAL_WB, VM_MEMATTR_WRITE_BACK) | \ 937 MAIR_ATTR(MAIR_NORMAL_WT, VM_MEMATTR_WRITE_THROUGH) | \ 938 MAIR_ATTR(MAIR_DEVICE_nGnRE, VM_MEMATTR_DEVICE_nGnRE) 939tcr: 940#if PAGE_SIZE == PAGE_SIZE_4K 941#define TCR_TG (TCR_TG1_4K | TCR_TG0_4K) 942#elif PAGE_SIZE == PAGE_SIZE_16K 943#define TCR_TG (TCR_TG1_16K | TCR_TG0_16K) 944#else 945#error Unsupported page size 946#endif 947 948 .quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_TG | \ 949 TCR_CACHE_ATTRS | TCR_SMP_ATTRS) 950sctlr_set: 951 /* Bits to set */ 952 .quad (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_UCI | SCTLR_SPAN | \ 953 SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | SCTLR_DZE | \ 954 SCTLR_I | SCTLR_SED | SCTLR_SA0 | SCTLR_SA | SCTLR_C | \ 955 SCTLR_M | SCTLR_CP15BEN | SCTLR_BT1 | SCTLR_BT0) 956sctlr_clear: 957 /* Bits to clear */ 958 .quad (SCTLR_EE | SCTLR_E0E | SCTLR_IESB | SCTLR_WXN | SCTLR_UMA | \ 959 SCTLR_ITD | SCTLR_A) 960LEND(start_mmu) 961 962ENTRY(abort) 963 b abort 964END(abort) 965 966.bss 967 .align PAGE_SHIFT 968initstack: 969 .space BOOT_STACK_SIZE 970initstack_end: 971 972 .section .init_pagetable, "aw", %nobits 973 .align PAGE_SHIFT 974 /* 975 * 6 initial tables (in the following order): 976 * L2 for kernel (High addresses) 977 * L1 for kernel 978 * L0 for kernel 979 * L1 bootstrap for user (Low addresses) 980 * L0 bootstrap for user 981 * L0 for user 982 */ 983 .globl pagetable_l0_ttbr1 984pagetable: 985#if PAGE_SIZE != PAGE_SIZE_4K 986 .space (PAGE_SIZE * L3_PAGE_COUNT) 987pagetable_l2_ttbr1: 988#endif 989 .space PAGE_SIZE 990pagetable_l1_ttbr1: 991 .space PAGE_SIZE 992pagetable_l0_ttbr1: 993 .space PAGE_SIZE 994pagetable_l2_ttbr0_bootstrap: 995 .space PAGE_SIZE 996pagetable_l1_ttbr0_bootstrap: 997 .space PAGE_SIZE 998pagetable_l0_ttbr0_bootstrap: 999 .space PAGE_SIZE 1000pagetable_l0_ttbr0: 1001 .space PAGE_SIZE 1002pagetable_end: 1003 1004el2_pagetable: 1005 .space PAGE_SIZE 1006 1007 .section .rodata, "a", %progbits 1008 .globl aarch32_sigcode 1009 .align 2 1010aarch32_sigcode: 1011 .word 0xe1a0000d // mov r0, sp 1012 .word 0xe2800040 // add r0, r0, #SIGF_UC 1013 .word 0xe59f700c // ldr r7, [pc, #12] 1014 .word 0xef000000 // swi #0 1015 .word 0xe59f7008 // ldr r7, [pc, #8] 1016 .word 0xef000000 // swi #0 1017 .word 0xeafffffa // b . - 16 1018 .word SYS_sigreturn 1019 .word SYS_exit 1020 .align 3 1021 .size aarch32_sigcode, . - aarch32_sigcode 1022aarch32_esigcode: 1023 .data 1024 .global sz_aarch32_sigcode 1025sz_aarch32_sigcode: 1026 .quad aarch32_esigcode - aarch32_sigcode 1027 1028GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) 1029