1/*- 2 * Copyright (c) 2012-2014 Andrew Turner 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include "assym.inc" 28#include "opt_kstack_pages.h" 29#include <sys/elf_common.h> 30#include <sys/syscall.h> 31#include <machine/asm.h> 32#include <machine/armreg.h> 33#include <machine/cpu.h> 34#include <machine/hypervisor.h> 35#include <machine/param.h> 36#include <machine/pte.h> 37#include <machine/vm.h> 38#include <machine/vmparam.h> 39 40#define VIRT_BITS 48 41 42#if PAGE_SIZE == PAGE_SIZE_16K 43/* 44 * The number of level 3 tables to create. 32 will allow for 1G of address 45 * space, the same as a single level 2 page with 4k pages. 46 */ 47#define L3_PAGE_COUNT 32 48#endif 49 50/* 51 * The size of our bootstrap stack. 52 */ 53#define BOOT_STACK_SIZE (KSTACK_PAGES * PAGE_SIZE) 54 55 .globl kernbase 56 .set kernbase, KERNBASE 57 58/* 59 * We assume: 60 * MMU on with an identity map, or off 61 * D-Cache: off 62 * I-Cache: on or off 63 * We are loaded at a 2MiB aligned address 64 */ 65 66ENTRY(_start) 67 /* Enter the kernel exception level */ 68 bl enter_kernel_el 69 70 /* 71 * Disable the MMU. We may have entered the kernel with it on and 72 * will need to update the tables later. If this has been set up 73 * with anything other than a VA == PA map then this will fail, 74 * but in this case the code to find where we are running from 75 * would have also failed. 76 */ 77 dsb sy 78 mrs x2, sctlr_el1 79 bic x2, x2, SCTLR_M 80 msr sctlr_el1, x2 81 isb 82 83 /* Set the context id */ 84 msr contextidr_el1, xzr 85 86 /* Get the virt -> phys offset */ 87 bl get_load_phys_addr 88 89 /* 90 * At this point: 91 * x28 = Our physical load address 92 */ 93 94 /* Create the page tables */ 95 bl create_pagetables 96 97 /* 98 * At this point: 99 * x27 = TTBR0 table 100 * x26 = Kernel L1 table 101 * x24 = TTBR1 table 102 */ 103 104 /* Enable the mmu */ 105 bl start_mmu 106 107 /* Load the new ttbr0 pagetable */ 108 adrp x27, pagetable_l0_ttbr0 109 add x27, x27, :lo12:pagetable_l0_ttbr0 110 111 /* Jump to the virtual address space */ 112 ldr x15, .Lvirtdone 113 br x15 114 115virtdone: 116 BTI_J 117 118 /* Set up the stack */ 119 adrp x25, initstack_end 120 add x25, x25, :lo12:initstack_end 121 sub sp, x25, #PCB_SIZE 122 123 /* Zero the BSS */ 124 ldr x15, .Lbss 125 ldr x14, .Lend 1261: 127 str xzr, [x15], #8 128 cmp x15, x14 129 b.lo 1b 130 131#if defined(PERTHREAD_SSP) 132 /* Set sp_el0 to the boot canary for early per-thread SSP to work */ 133 adrp x15, boot_canary 134 add x15, x15, :lo12:boot_canary 135 msr sp_el0, x15 136#endif 137 138 /* Backup the module pointer */ 139 mov x1, x0 140 141 sub sp, sp, #BOOTPARAMS_SIZE 142 mov x0, sp 143 144 str x1, [x0, #BP_MODULEP] 145 adrp x25, initstack 146 add x25, x25, :lo12:initstack 147 str x25, [x0, #BP_KERN_STACK] 148 str x27, [x0, #BP_KERN_TTBR0] 149 str x23, [x0, #BP_BOOT_EL] 150 str x4, [x0, #BP_HCR_EL2] 151 152#ifdef KASAN 153 /* Save bootparams */ 154 mov x19, x0 155 156 /* Bootstrap an early shadow map for the boot stack. */ 157 ldr x0, [x0, #BP_KERN_STACK] 158 ldr x1, =BOOT_STACK_SIZE 159 bl kasan_init_early 160 161 /* Restore bootparams */ 162 mov x0, x19 163#endif 164 165 /* trace back starts here */ 166 mov fp, #0 167 /* Branch to C code */ 168 bl initarm 169 /* We are done with the boot params */ 170 add sp, sp, #BOOTPARAMS_SIZE 171 172 /* 173 * Enable pointer authentication in the kernel. We set the keys for 174 * thread0 in initarm so have to wait until it returns to enable it. 175 * If we were to enable it in initarm then any authentication when 176 * returning would fail as it was called with pointer authentication 177 * disabled. 178 */ 179 bl ptrauth_start 180 181 bl mi_startup 182 183 /* We should not get here */ 184 brk 0 185 186 .align 3 187.Lvirtdone: 188 .quad virtdone 189.Lbss: 190 .quad __bss_start 191.Lend: 192 .quad __bss_end 193END(_start) 194 195#ifdef SMP 196/* 197 * void 198 * mpentry_psci(unsigned long) 199 * 200 * Called by a core when it is being brought online with psci. 201 * The data in x0 is passed straight to init_secondary. 202 */ 203ENTRY(mpentry_psci) 204 mov x26, xzr 205 b mpentry_common 206END(mpentry_psci) 207 208/* 209 * void 210 * mpentry_spintable(void) 211 * 212 * Called by a core when it is being brought online with a spin-table. 213 * Reads the new CPU ID and passes this to init_secondary. 214 */ 215ENTRY(mpentry_spintable) 216 ldr x26, =spintable_wait 217 b mpentry_common 218END(mpentry_spintable) 219 220/* Wait for the current CPU to be released */ 221LENTRY(spintable_wait) 222 /* Read the affinity bits from mpidr_el1 */ 223 mrs x1, mpidr_el1 224 ldr x2, =CPU_AFF_MASK 225 and x1, x1, x2 226 227 adrp x2, ap_cpuid 2281: 229 ldr x0, [x2, :lo12:ap_cpuid] 230 cmp x0, x1 231 b.ne 1b 232 233 str xzr, [x2, :lo12:ap_cpuid] 234 dsb sy 235 sev 236 237 ret 238LEND(mpentry_spintable) 239 240LENTRY(mpentry_common) 241 /* Disable interrupts */ 242 msr daifset, #DAIF_INTR 243 244 /* Enter the kernel exception level */ 245 bl enter_kernel_el 246 247 /* Set the context id */ 248 msr contextidr_el1, xzr 249 250 /* Load the kernel page table */ 251 adrp x24, pagetable_l0_ttbr1 252 add x24, x24, :lo12:pagetable_l0_ttbr1 253 /* Load the identity page table */ 254 adrp x27, pagetable_l0_ttbr0_bootstrap 255 add x27, x27, :lo12:pagetable_l0_ttbr0_bootstrap 256 257 /* Enable the mmu */ 258 bl start_mmu 259 260 /* Load the new ttbr0 pagetable */ 261 adrp x27, pagetable_l0_ttbr0 262 add x27, x27, :lo12:pagetable_l0_ttbr0 263 264 /* Jump to the virtual address space */ 265 ldr x15, =mp_virtdone 266 br x15 267 268mp_virtdone: 269 BTI_J 270 271 /* 272 * Allow this CPU to wait until the kernel is ready for it, 273 * e.g. with spin-table but each CPU uses the same release address 274 */ 275 cbz x26, 1f 276 blr x26 2771: 278 279 /* Start using the AP boot stack */ 280 adrp x4, bootstack 281 ldr x4, [x4, :lo12:bootstack] 282 mov sp, x4 283 284#if defined(PERTHREAD_SSP) 285 /* Set sp_el0 to the boot canary for early per-thread SSP to work */ 286 adrp x15, boot_canary 287 add x15, x15, :lo12:boot_canary 288 msr sp_el0, x15 289#endif 290 291 /* Load the kernel ttbr0 pagetable */ 292 msr ttbr0_el1, x27 293 isb 294 295 /* Invalidate the TLB */ 296 tlbi vmalle1 297 dsb sy 298 isb 299 300 /* 301 * Initialize the per-CPU pointer before calling into C code, for the 302 * benefit of kernel sanitizers. 303 */ 304 adrp x18, bootpcpu 305 ldr x18, [x18, :lo12:bootpcpu] 306 msr tpidr_el1, x18 307 308 b init_secondary 309LEND(mpentry_common) 310#endif 311 312/* 313 * If we are started in EL2, configure the required hypervisor 314 * registers and drop to EL1. 315 */ 316LENTRY(enter_kernel_el) 317 mrs x23, CurrentEL 318 and x23, x23, #(CURRENTEL_EL_MASK) 319 cmp x23, #(CURRENTEL_EL_EL2) 320 b.eq 1f 321 ret 3221: 323 /* 324 * Disable the MMU. If the HCR_EL2.E2H field is set we will clear it 325 * which may break address translation. 326 */ 327 dsb sy 328 mrs x2, sctlr_el2 329 bic x2, x2, SCTLR_M 330 msr sctlr_el2, x2 331 isb 332 333 /* Configure the Hypervisor */ 334 ldr x2, =(HCR_RW | HCR_APK | HCR_API) 335 msr hcr_el2, x2 336 337 /* Stash value of HCR_EL2 for later */ 338 isb 339 mrs x4, hcr_el2 340 341 /* Load the Virtualization Process ID Register */ 342 mrs x2, midr_el1 343 msr vpidr_el2, x2 344 345 /* Load the Virtualization Multiprocess ID Register */ 346 mrs x2, mpidr_el1 347 msr vmpidr_el2, x2 348 349 /* Set the bits that need to be 1 in sctlr_el1 */ 350 ldr x2, .Lsctlr_res1 351 msr sctlr_el1, x2 352 353 /* 354 * On some hardware, e.g., Apple M1, we can't clear E2H, so make sure we 355 * don't trap to EL2 for SIMD register usage to have at least a 356 * minimally usable system. 357 */ 358 tst x4, #HCR_E2H 359 mov x3, #CPTR_RES1 /* HCR_E2H == 0 */ 360 mov x5, #CPTR_FPEN /* HCR_E2H == 1 */ 361 csel x2, x3, x5, eq 362 msr cptr_el2, x2 363 364 /* Don't trap to EL2 for CP15 traps */ 365 msr hstr_el2, xzr 366 367 /* Enable access to the physical timers at EL1 */ 368 mrs x2, cnthctl_el2 369 orr x2, x2, #(CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN) 370 msr cnthctl_el2, x2 371 372 /* Set the counter offset to a known value */ 373 msr cntvoff_el2, xzr 374 375 /* Hypervisor trap functions */ 376 adrp x2, hyp_stub_vectors 377 add x2, x2, :lo12:hyp_stub_vectors 378 msr vbar_el2, x2 379 380 /* Zero vttbr_el2 so a hypervisor can tell the host and guest apart */ 381 msr vttbr_el2, xzr 382 383 mov x2, #(PSR_DAIF | PSR_M_EL1h) 384 msr spsr_el2, x2 385 386 /* Configure GICv3 CPU interface */ 387 mrs x2, id_aa64pfr0_el1 388 /* Extract GIC bits from the register */ 389 ubfx x2, x2, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_BITS 390 /* GIC[3:0] == 0001 - GIC CPU interface via special regs. supported */ 391 cmp x2, #(ID_AA64PFR0_GIC_CPUIF_EN >> ID_AA64PFR0_GIC_SHIFT) 392 b.ne 2f 393 394 mrs x2, icc_sre_el2 395 orr x2, x2, #ICC_SRE_EL2_EN /* Enable access from insecure EL1 */ 396 orr x2, x2, #ICC_SRE_EL2_SRE /* Enable system registers */ 397 msr icc_sre_el2, x2 3982: 399 400 /* Set the address to return to our return address */ 401 msr elr_el2, x30 402 isb 403 404 eret 405 406 .align 3 407.Lsctlr_res1: 408 .quad SCTLR_RES1 409LEND(enter_kernel_el) 410 411/* 412 * Get the physical address the kernel was loaded at. 413 */ 414LENTRY(get_load_phys_addr) 415 /* Load the offset of get_load_phys_addr from KERNBASE */ 416 ldr x28, =(get_load_phys_addr - KERNBASE) 417 /* Load the physical address of get_load_phys_addr */ 418 adr x29, get_load_phys_addr 419 /* Find the physical address of KERNBASE, i.e. our load address */ 420 sub x28, x29, x28 421 ret 422LEND(get_load_phys_addr) 423 424/* 425 * This builds the page tables containing the identity map, and the kernel 426 * virtual map. 427 * 428 * It relys on: 429 * We were loaded to an address that is on a 2MiB boundary 430 * All the memory must not cross a 1GiB boundaty 431 * x28 contains the physical address we were loaded from 432 * 433 * There are 7 or 8 pages before that address for the page tables 434 * The pages used are: 435 * - The Kernel L3 tables (only for 16k kernel) 436 * - The Kernel L2 table 437 * - The Kernel L1 table 438 * - The Kernel L0 table (TTBR1) 439 * - The identity (PA = VA) L2 table 440 * - The identity (PA = VA) L1 table 441 * - The identity (PA = VA) L0 table (Early TTBR0) 442 * - The Kernel empty L0 table (Late TTBR0) 443 */ 444LENTRY(create_pagetables) 445 /* Save the Link register */ 446 mov x5, x30 447 448 /* Clean the page table */ 449 adrp x6, pagetable 450 add x6, x6, :lo12:pagetable 451 mov x26, x6 452 adrp x27, pagetable_end 453 add x27, x27, :lo12:pagetable_end 4541: 455 stp xzr, xzr, [x6], #16 456 stp xzr, xzr, [x6], #16 457 stp xzr, xzr, [x6], #16 458 stp xzr, xzr, [x6], #16 459 cmp x6, x27 460 b.lo 1b 461 462 /* 463 * Build the TTBR1 maps. 464 */ 465 466 /* Find the size of the kernel */ 467 mov x6, #(KERNBASE) 468 469#if defined(LINUX_BOOT_ABI) 470 /* X19 is used as 'map FDT data' flag */ 471 mov x19, xzr 472 473 /* No modules or FDT pointer ? */ 474 cbz x0, booti_no_fdt 475 476 /* 477 * Test if x0 points to modules descriptor(virtual address) or 478 * to FDT (physical address) 479 */ 480 cmp x0, x6 /* x6 is #(KERNBASE) */ 481 b.lo booti_fdt 482#endif 483 484 /* Booted with modules pointer */ 485 /* Find modulep - begin */ 486 sub x8, x0, x6 487 /* 488 * Add space for the module data. When PAGE_SIZE is 4k this will 489 * add at least 2 level 2 blocks (2 * 2MiB). When PAGE_SIZE is 490 * larger it will be at least as large as we use smaller level 3 491 * pages. 492 */ 493 ldr x7, =((6 * 1024 * 1024) - 1) 494 add x8, x8, x7 495 b common 496 497#if defined(LINUX_BOOT_ABI) 498booti_fdt: 499 /* Booted by U-Boot booti with FDT data */ 500 /* Set 'map FDT data' flag */ 501 mov x19, #1 502 503booti_no_fdt: 504 /* Booted by U-Boot booti without FTD data */ 505 /* Find the end - begin */ 506 ldr x7, .Lend 507 sub x8, x7, x6 508 509 /* 510 * Add one 2MiB page for copy of FDT data (maximum FDT size), 511 * one for metadata and round up 512 */ 513 ldr x7, =(3 * L2_SIZE - 1) 514 add x8, x8, x7 515#endif 516 517common: 518#if PAGE_SIZE != PAGE_SIZE_4K 519 /* 520 * Create L3 and L3C pages. The kernel will be loaded at a 2M aligned 521 * address, enabling the creation of L3C pages. However, when the page 522 * size is larger than 4k, L2 blocks are too large to map the kernel 523 * with 2M alignment. 524 */ 525#define PTE_SHIFT L3_SHIFT 526#define BUILD_PTE_FUNC build_l3_page_pagetable 527#else 528#define PTE_SHIFT L2_SHIFT 529#define BUILD_PTE_FUNC build_l2_block_pagetable 530#endif 531 532 /* Get the number of blocks/pages to allocate, rounded down */ 533 lsr x10, x8, #(PTE_SHIFT) 534 535 /* Create the kernel space PTE table */ 536 mov x6, x26 537 mov x7, #(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) 538 mov x8, #(KERNBASE) 539 mov x9, x28 540 bl BUILD_PTE_FUNC 541 542#undef PTE_SHIFT 543#undef BUILD_PTE_FUNC 544 545#if PAGE_SIZE != PAGE_SIZE_4K 546 /* Move to the l2 table */ 547 ldr x9, =(PAGE_SIZE * L3_PAGE_COUNT) 548 add x26, x26, x9 549 550 /* Link the l2 -> l3 table */ 551 mov x9, x6 552 mov x6, x26 553 bl link_l2_pagetable 554#endif 555 556 /* Move to the l1 table */ 557 add x26, x26, #PAGE_SIZE 558 559 /* Link the l1 -> l2 table */ 560 mov x9, x6 561 mov x6, x26 562 bl link_l1_pagetable 563 564 /* Move to the l0 table */ 565 add x24, x26, #PAGE_SIZE 566 567 /* Link the l0 -> l1 table */ 568 mov x9, x6 569 mov x6, x24 570 mov x10, #1 571 bl link_l0_pagetable 572 573 /* 574 * Build the TTBR0 maps. As TTBR0 maps, they must specify ATTR_S1_nG. 575 * They are only needed early on, so the VA = PA map is uncached. 576 */ 577 add x27, x24, #PAGE_SIZE 578 579 mov x6, x27 /* The initial page table */ 580 581 /* Create the VA = PA map */ 582 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) 583 adrp x16, _start 584 and x16, x16, #(~L2_OFFSET) 585 mov x9, x16 /* PA start */ 586 mov x8, x16 /* VA start (== PA start) */ 587 mov x10, #1 588 bl build_l2_block_pagetable 589 590#if defined(SOCDEV_PA) 591 /* Create a table for the UART */ 592 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_DEVICE)) 593 ldr x9, =(L2_SIZE) 594 add x16, x16, x9 /* VA start */ 595 mov x8, x16 596 597 /* Store the socdev virtual address */ 598 add x17, x8, #(SOCDEV_PA & L2_OFFSET) 599 adrp x9, socdev_va 600 str x17, [x9, :lo12:socdev_va] 601 602 mov x9, #(SOCDEV_PA & ~L2_OFFSET) /* PA start */ 603 mov x10, #1 604 bl build_l2_block_pagetable 605#endif 606 607#if defined(LINUX_BOOT_ABI) 608 /* Map FDT data ? */ 609 cbz x19, 1f 610 611 /* Create the mapping for FDT data (2 MiB max) */ 612 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) 613 ldr x9, =(L2_SIZE) 614 add x16, x16, x9 /* VA start */ 615 mov x8, x16 616 mov x9, x0 /* PA start */ 617 /* Update the module pointer to point at the allocated memory */ 618 and x0, x0, #(L2_OFFSET) /* Keep the lower bits */ 619 add x0, x0, x8 /* Add the aligned virtual address */ 620 621 mov x10, #1 622 bl build_l2_block_pagetable 623 6241: 625#endif 626 627 /* Move to the l1 table */ 628 add x27, x27, #PAGE_SIZE 629 630 /* Link the l1 -> l2 table */ 631 mov x9, x6 632 mov x6, x27 633 bl link_l1_pagetable 634 635 /* Move to the l0 table */ 636 add x27, x27, #PAGE_SIZE 637 638 /* Link the l0 -> l1 table */ 639 mov x9, x6 640 mov x6, x27 641 mov x10, #1 642 bl link_l0_pagetable 643 644 /* Restore the Link register */ 645 mov x30, x5 646 ret 647LEND(create_pagetables) 648 649/* 650 * Builds an L0 -> L1 table descriptor 651 * 652 * x6 = L0 table 653 * x8 = Virtual Address 654 * x9 = L1 PA (trashed) 655 * x10 = Entry count (trashed) 656 * x11, x12 and x13 are trashed 657 */ 658LENTRY(link_l0_pagetable) 659 /* 660 * Link an L0 -> L1 table entry. 661 */ 662 /* Find the table index */ 663 lsr x11, x8, #L0_SHIFT 664 and x11, x11, #L0_ADDR_MASK 665 666 /* Build the L0 block entry */ 667 mov x12, #L0_TABLE 668 orr x12, x12, #(TATTR_UXN_TABLE | TATTR_AP_TABLE_NO_EL0) 669 670 /* Only use the output address bits */ 671 lsr x9, x9, #PAGE_SHIFT 6721: orr x13, x12, x9, lsl #PAGE_SHIFT 673 674 /* Store the entry */ 675 str x13, [x6, x11, lsl #3] 676 677 sub x10, x10, #1 678 add x11, x11, #1 679 add x9, x9, #1 680 cbnz x10, 1b 681 682 ret 683LEND(link_l0_pagetable) 684 685/* 686 * Builds an L1 -> L2 table descriptor 687 * 688 * x6 = L1 table 689 * x8 = Virtual Address 690 * x9 = L2 PA (trashed) 691 * x11, x12 and x13 are trashed 692 */ 693LENTRY(link_l1_pagetable) 694 /* 695 * Link an L1 -> L2 table entry. 696 */ 697 /* Find the table index */ 698 lsr x11, x8, #L1_SHIFT 699 and x11, x11, #Ln_ADDR_MASK 700 701 /* Build the L1 block entry */ 702 mov x12, #L1_TABLE 703 704 /* Only use the output address bits */ 705 lsr x9, x9, #PAGE_SHIFT 706 orr x13, x12, x9, lsl #PAGE_SHIFT 707 708 /* Store the entry */ 709 str x13, [x6, x11, lsl #3] 710 711 ret 712LEND(link_l1_pagetable) 713 714/* 715 * Builds count 2 MiB page table entry 716 * x6 = L2 table 717 * x7 = Block attributes 718 * x8 = VA start 719 * x9 = PA start (trashed) 720 * x10 = Entry count (trashed) 721 * x11, x12 and x13 are trashed 722 */ 723LENTRY(build_l2_block_pagetable) 724 /* 725 * Build the L2 table entry. 726 */ 727 /* Find the table index */ 728 lsr x11, x8, #L2_SHIFT 729 and x11, x11, #Ln_ADDR_MASK 730 731 /* Build the L2 block entry */ 732 orr x12, x7, #L2_BLOCK 733 orr x12, x12, #(ATTR_DEFAULT) 734 orr x12, x12, #(ATTR_S1_UXN) 735#ifdef __ARM_FEATURE_BTI_DEFAULT 736 orr x12, x12, #(ATTR_S1_GP) 737#endif 738 739 /* Only use the output address bits */ 740 lsr x9, x9, #L2_SHIFT 741 742 /* Set the physical address for this virtual address */ 7431: orr x13, x12, x9, lsl #L2_SHIFT 744 745 /* Store the entry */ 746 str x13, [x6, x11, lsl #3] 747 748 sub x10, x10, #1 749 add x11, x11, #1 750 add x9, x9, #1 751 cbnz x10, 1b 752 753 ret 754LEND(build_l2_block_pagetable) 755 756#if PAGE_SIZE != PAGE_SIZE_4K 757/* 758 * Builds an L2 -> L3 table descriptor 759 * 760 * x6 = L2 table 761 * x8 = Virtual Address 762 * x9 = L3 PA (trashed) 763 * x11, x12 and x13 are trashed 764 */ 765LENTRY(link_l2_pagetable) 766 /* 767 * Link an L2 -> L3 table entry. 768 */ 769 /* Find the table index */ 770 lsr x11, x8, #L2_SHIFT 771 and x11, x11, #Ln_ADDR_MASK 772 773 /* Build the L1 block entry */ 774 mov x12, #L2_TABLE 775 776 /* Only use the output address bits */ 777 lsr x9, x9, #PAGE_SHIFT 778 orr x13, x12, x9, lsl #PAGE_SHIFT 779 780 /* Store the entry */ 781 str x13, [x6, x11, lsl #3] 782 783 ret 784LEND(link_l2_pagetable) 785 786/* 787 * Builds count level 3 page table entries. Uses ATTR_CONTIGUOUS to create 788 * large page (L3C) mappings when the current VA and remaining count allow 789 * it. 790 * x6 = L3 table 791 * x7 = Block attributes 792 * x8 = VA start 793 * x9 = PA start (trashed) 794 * x10 = Entry count (trashed) 795 * x11, x12 and x13 are trashed 796 * 797 * VA start (x8) modulo L3C_SIZE must equal PA start (x9) modulo L3C_SIZE. 798 */ 799LENTRY(build_l3_page_pagetable) 800 /* 801 * Build the L3 table entry. 802 */ 803 /* Find the table index */ 804 lsr x11, x8, #L3_SHIFT 805 and x11, x11, #Ln_ADDR_MASK 806 807 /* Build the L3 page entry */ 808 orr x12, x7, #L3_PAGE 809 orr x12, x12, #(ATTR_DEFAULT) 810 orr x12, x12, #(ATTR_S1_UXN) 811#ifdef __ARM_FEATURE_BTI_DEFAULT 812 orr x12, x12, #(ATTR_S1_GP) 813#endif 814 815 /* Only use the output address bits */ 816 lsr x9, x9, #L3_SHIFT 817 818 /* Check if an ATTR_CONTIGUOUS mapping is possible */ 8191: tst x11, #(L3C_ENTRIES - 1) 820 b.ne 2f 821 cmp x10, #L3C_ENTRIES 822 b.lo 3f 823 orr x12, x12, #(ATTR_CONTIGUOUS) 824 b 2f 8253: and x12, x12, #(~ATTR_CONTIGUOUS) 826 827 /* Set the physical address for this virtual address */ 8282: orr x13, x12, x9, lsl #L3_SHIFT 829 830 /* Store the entry */ 831 str x13, [x6, x11, lsl #3] 832 833 sub x10, x10, #1 834 add x11, x11, #1 835 add x9, x9, #1 836 cbnz x10, 1b 837 838 ret 839LEND(build_l3_page_pagetable) 840#endif 841 842LENTRY(start_mmu) 843 dsb sy 844 845 /* Load the exception vectors */ 846 ldr x2, =exception_vectors 847 msr vbar_el1, x2 848 849 /* Load ttbr0 and ttbr1 */ 850 msr ttbr0_el1, x27 851 msr ttbr1_el1, x24 852 isb 853 854 /* Clear the Monitor Debug System control register */ 855 msr mdscr_el1, xzr 856 857 /* Invalidate the TLB */ 858 tlbi vmalle1is 859 dsb ish 860 isb 861 862 ldr x2, mair 863 msr mair_el1, x2 864 865 /* 866 * Setup TCR according to the PARange and ASIDBits fields 867 * from ID_AA64MMFR0_EL1 and the HAFDBS field from the 868 * ID_AA64MMFR1_EL1. More precisely, set TCR_EL1.AS 869 * to 1 only if the ASIDBits field equals 0b0010. 870 */ 871 ldr x2, tcr 872 mrs x3, id_aa64mmfr0_el1 873 874 /* Copy the bottom 3 bits from id_aa64mmfr0_el1 into TCR.IPS */ 875 bfi x2, x3, #(TCR_IPS_SHIFT), #(TCR_IPS_WIDTH) 876 and x3, x3, #(ID_AA64MMFR0_ASIDBits_MASK) 877 878 /* Check if the HW supports 16 bit ASIDS */ 879 cmp x3, #(ID_AA64MMFR0_ASIDBits_16) 880 /* If so x3 == 1, else x3 == 0 */ 881 cset x3, eq 882 /* Set TCR.AS with x3 */ 883 bfi x2, x3, #(TCR_ASID_SHIFT), #(TCR_ASID_WIDTH) 884 885 /* 886 * Check if the HW supports access flag and dirty state updates, 887 * and set TCR_EL1.HA and TCR_EL1.HD accordingly. 888 */ 889 mrs x3, id_aa64mmfr1_el1 890 and x3, x3, #(ID_AA64MMFR1_HAFDBS_MASK) 891 cmp x3, #1 892 b.ne 1f 893 orr x2, x2, #(TCR_HA) 894 b 2f 8951: 896 cmp x3, #2 897 b.ne 2f 898 orr x2, x2, #(TCR_HA | TCR_HD) 8992: 900 msr tcr_el1, x2 901 902 /* 903 * Setup SCTLR. 904 */ 905 ldr x2, sctlr_set 906 ldr x3, sctlr_clear 907 mrs x1, sctlr_el1 908 bic x1, x1, x3 /* Clear the required bits */ 909 orr x1, x1, x2 /* Set the required bits */ 910 msr sctlr_el1, x1 911 isb 912 913 ret 914 915 .align 3 916mair: 917 .quad MAIR_ATTR(MAIR_DEVICE_nGnRnE, VM_MEMATTR_DEVICE_nGnRnE) | \ 918 MAIR_ATTR(MAIR_NORMAL_NC, VM_MEMATTR_UNCACHEABLE) | \ 919 MAIR_ATTR(MAIR_NORMAL_WB, VM_MEMATTR_WRITE_BACK) | \ 920 MAIR_ATTR(MAIR_NORMAL_WT, VM_MEMATTR_WRITE_THROUGH) | \ 921 MAIR_ATTR(MAIR_DEVICE_nGnRE, VM_MEMATTR_DEVICE_nGnRE) 922tcr: 923#if PAGE_SIZE == PAGE_SIZE_4K 924#define TCR_TG (TCR_TG1_4K | TCR_TG0_4K) 925#elif PAGE_SIZE == PAGE_SIZE_16K 926#define TCR_TG (TCR_TG1_16K | TCR_TG0_16K) 927#else 928#error Unsupported page size 929#endif 930 931 .quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_TG | \ 932 TCR_CACHE_ATTRS | TCR_SMP_ATTRS) 933sctlr_set: 934 /* Bits to set */ 935 .quad (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_UCI | SCTLR_SPAN | \ 936 SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | SCTLR_DZE | \ 937 SCTLR_I | SCTLR_SED | SCTLR_SA0 | SCTLR_SA | SCTLR_C | \ 938 SCTLR_M | SCTLR_CP15BEN | SCTLR_BT1 | SCTLR_BT0) 939sctlr_clear: 940 /* Bits to clear */ 941 .quad (SCTLR_EE | SCTLR_E0E | SCTLR_IESB | SCTLR_WXN | SCTLR_UMA | \ 942 SCTLR_ITD | SCTLR_A) 943LEND(start_mmu) 944 945ENTRY(abort) 946 b abort 947END(abort) 948 949.bss 950 .align PAGE_SHIFT 951initstack: 952 .space BOOT_STACK_SIZE 953initstack_end: 954 955 .section .init_pagetable, "aw", %nobits 956 .align PAGE_SHIFT 957 /* 958 * 6 initial tables (in the following order): 959 * L2 for kernel (High addresses) 960 * L1 for kernel 961 * L0 for kernel 962 * L1 bootstrap for user (Low addresses) 963 * L0 bootstrap for user 964 * L0 for user 965 */ 966 .globl pagetable_l0_ttbr1 967pagetable: 968#if PAGE_SIZE != PAGE_SIZE_4K 969 .space (PAGE_SIZE * L3_PAGE_COUNT) 970pagetable_l2_ttbr1: 971#endif 972 .space PAGE_SIZE 973pagetable_l1_ttbr1: 974 .space PAGE_SIZE 975pagetable_l0_ttbr1: 976 .space PAGE_SIZE 977pagetable_l2_ttbr0_bootstrap: 978 .space PAGE_SIZE 979pagetable_l1_ttbr0_bootstrap: 980 .space PAGE_SIZE 981pagetable_l0_ttbr0_bootstrap: 982 .space PAGE_SIZE 983pagetable_l0_ttbr0: 984 .space PAGE_SIZE 985pagetable_end: 986 987el2_pagetable: 988 .space PAGE_SIZE 989 990 .section .rodata, "a", %progbits 991 .globl aarch32_sigcode 992 .align 2 993aarch32_sigcode: 994 .word 0xe1a0000d // mov r0, sp 995 .word 0xe2800040 // add r0, r0, #SIGF_UC 996 .word 0xe59f700c // ldr r7, [pc, #12] 997 .word 0xef000000 // swi #0 998 .word 0xe59f7008 // ldr r7, [pc, #8] 999 .word 0xef000000 // swi #0 1000 .word 0xeafffffa // b . - 16 1001 .word SYS_sigreturn 1002 .word SYS_exit 1003 .align 3 1004 .size aarch32_sigcode, . - aarch32_sigcode 1005aarch32_esigcode: 1006 .data 1007 .global sz_aarch32_sigcode 1008sz_aarch32_sigcode: 1009 .quad aarch32_esigcode - aarch32_sigcode 1010 1011GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) 1012