1/*- 2 * Copyright (c) 2012-2014 Andrew Turner 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include "assym.inc" 28#include "opt_kstack_pages.h" 29#include <sys/elf_common.h> 30#include <sys/syscall.h> 31#include <machine/asm.h> 32#include <machine/armreg.h> 33#include <machine/cpu.h> 34#include <machine/hypervisor.h> 35#include <machine/param.h> 36#include <machine/pte.h> 37#include <machine/vm.h> 38#include <machine/vmparam.h> 39 40#define VIRT_BITS 48 41 42#if PAGE_SIZE == PAGE_SIZE_16K 43/* 44 * The number of level 3 tables to create. 32 will allow for 1G of address 45 * space, the same as a single level 2 page with 4k pages. 46 */ 47#define L3_PAGE_COUNT 32 48#endif 49 50/* 51 * The size of our bootstrap stack. 52 */ 53#define BOOT_STACK_SIZE (KSTACK_PAGES * PAGE_SIZE) 54 55 .globl kernbase 56 .set kernbase, KERNBASE 57 58/* 59 * We assume: 60 * MMU on with an identity map, or off 61 * D-Cache: off 62 * I-Cache: on or off 63 * We are loaded at a 2MiB aligned address 64 */ 65 66ENTRY(_start) 67 /* Enter the kernel exception level */ 68 bl enter_kernel_el 69 70 /* Set the context id */ 71 msr contextidr_el1, xzr 72 73 /* Get the virt -> phys offset */ 74 bl get_load_phys_addr 75 76 /* 77 * At this point: 78 * x28 = Our physical load address 79 */ 80 81 /* Create the page tables */ 82 bl create_pagetables 83 84 /* 85 * At this point: 86 * x27 = TTBR0 table 87 * x26 = Kernel L1 table 88 * x24 = TTBR1 table 89 * x22 = PTE shareability attributes 90 * x21 = BTI guarded page attribute if supported 91 */ 92 93 /* Enable the mmu */ 94 bl start_mmu 95 96 /* Load the new ttbr0 pagetable */ 97 adrp x27, pagetable_l0_ttbr0 98 add x27, x27, :lo12:pagetable_l0_ttbr0 99 100 /* Jump to the virtual address space */ 101 ldr x15, .Lvirtdone 102 br x15 103 104virtdone: 105 BTI_J 106 107 /* Set up the stack */ 108 adrp x25, initstack_end 109 add x25, x25, :lo12:initstack_end 110 sub sp, x25, #PCB_SIZE 111 112 /* Zero the BSS */ 113 ldr x15, .Lbss 114 ldr x14, .Lend 1151: 116 stp xzr, xzr, [x15], #16 117 cmp x15, x14 118 b.lo 1b 119 120#if defined(PERTHREAD_SSP) 121 /* Set sp_el0 to the boot canary for early per-thread SSP to work */ 122 adrp x15, boot_canary 123 add x15, x15, :lo12:boot_canary 124 msr sp_el0, x15 125#endif 126 127 /* Backup the module pointer */ 128 mov x1, x0 129 130 sub sp, sp, #BOOTPARAMS_SIZE 131 mov x0, sp 132 133 str x1, [x0, #BP_MODULEP] 134 adrp x25, initstack 135 add x25, x25, :lo12:initstack 136 str x25, [x0, #BP_KERN_STACK] 137 str x27, [x0, #BP_KERN_TTBR0] 138 str x23, [x0, #BP_BOOT_EL] 139 140 /* Set these before they are used in kasan_init_early */ 141 adrp x1, pmap_sh_attr 142 str x22, [x1, :lo12:pmap_sh_attr] 143#ifdef __ARM_FEATURE_BTI_DEFAULT 144 adrp x1, pmap_gp_attr 145 str x21, [x1, :lo12:pmap_gp_attr] 146#endif 147 148#ifdef KASAN 149 /* Save bootparams */ 150 mov x19, x0 151 152 /* Bootstrap an early shadow map for the boot stack. */ 153 ldr x0, [x0, #BP_KERN_STACK] 154 ldr x1, =BOOT_STACK_SIZE 155 bl kasan_init_early 156 157 /* Restore bootparams */ 158 mov x0, x19 159#endif 160 161 /* trace back starts here */ 162 mov fp, #0 163 /* Branch to C code */ 164 bl initarm 165 /* We are done with the boot params */ 166 add sp, sp, #BOOTPARAMS_SIZE 167 168 /* 169 * Enable pointer authentication in the kernel. We set the keys for 170 * thread0 in initarm so have to wait until it returns to enable it. 171 * If we were to enable it in initarm then any authentication when 172 * returning would fail as it was called with pointer authentication 173 * disabled. 174 */ 175 bl ptrauth_start 176 177 bl mi_startup 178 179 /* We should not get here */ 180 brk 0 181 182 .align 3 183.Lvirtdone: 184 .quad virtdone 185.Lbss: 186 .quad __bss_start 187.Lend: 188 .quad __bss_end 189END(_start) 190 191#ifdef SMP 192/* 193 * void 194 * mpentry_psci(unsigned long) 195 * 196 * Called by a core when it is being brought online with psci. 197 * The data in x0 is passed straight to init_secondary. 198 */ 199ENTRY(mpentry_psci) 200 mov x26, xzr 201 b mpentry_common 202END(mpentry_psci) 203 204/* 205 * void 206 * mpentry_spintable(void) 207 * 208 * Called by a core when it is being brought online with a spin-table. 209 * Reads the new CPU ID and passes this to init_secondary. 210 */ 211ENTRY(mpentry_spintable) 212 ldr x26, =spintable_wait 213 b mpentry_common 214END(mpentry_spintable) 215 216/* Wait for the current CPU to be released */ 217LENTRY(spintable_wait) 218 /* Read the affinity bits from mpidr_el1 */ 219 mrs x1, mpidr_el1 220 ldr x2, =CPU_AFF_MASK 221 and x1, x1, x2 222 223 adrp x2, ap_cpuid 2241: 225 ldr x0, [x2, :lo12:ap_cpuid] 226 cmp x0, x1 227 b.ne 1b 228 229 str xzr, [x2, :lo12:ap_cpuid] 230 dsb sy 231 sev 232 233 ret 234LEND(mpentry_spintable) 235 236LENTRY(mpentry_common) 237 /* Disable interrupts */ 238 msr daifset, #DAIF_INTR 239 240 /* Enter the kernel exception level */ 241 bl enter_kernel_el 242 243 /* Set the context id */ 244 msr contextidr_el1, xzr 245 246 /* Load the kernel page table */ 247 adrp x24, pagetable_l0_ttbr1 248 add x24, x24, :lo12:pagetable_l0_ttbr1 249 /* Load the identity page table */ 250 adrp x27, pagetable_l0_ttbr0_bootstrap 251 add x27, x27, :lo12:pagetable_l0_ttbr0_bootstrap 252 253 /* Enable the mmu */ 254 bl start_mmu 255 256 /* Load the new ttbr0 pagetable */ 257 adrp x27, pagetable_l0_ttbr0 258 add x27, x27, :lo12:pagetable_l0_ttbr0 259 260 /* Jump to the virtual address space */ 261 ldr x15, =mp_virtdone 262 br x15 263 264mp_virtdone: 265 BTI_J 266 267 /* 268 * Allow this CPU to wait until the kernel is ready for it, 269 * e.g. with spin-table but each CPU uses the same release address 270 */ 271 cbz x26, 1f 272 blr x26 2731: 274 275 /* Start using the AP boot stack */ 276 adrp x4, bootstack 277 ldr x4, [x4, :lo12:bootstack] 278 mov sp, x4 279 280#if defined(PERTHREAD_SSP) 281 /* Set sp_el0 to the boot canary for early per-thread SSP to work */ 282 adrp x15, boot_canary 283 add x15, x15, :lo12:boot_canary 284 msr sp_el0, x15 285#endif 286 287 /* Load the kernel ttbr0 pagetable */ 288 msr ttbr0_el1, x27 289 isb 290 291 /* Invalidate the TLB */ 292 tlbi vmalle1 293 dsb sy 294 isb 295 296 /* 297 * Initialize the per-CPU pointer before calling into C code, for the 298 * benefit of kernel sanitizers. 299 */ 300 adrp x18, bootpcpu 301 ldr x18, [x18, :lo12:bootpcpu] 302 msr tpidr_el1, x18 303 304 b init_secondary 305LEND(mpentry_common) 306#endif 307 308/* 309 * Enter the exception level the kernel will use: 310 * 311 * - If in EL1 continue in EL1 312 * - If the CPU supports FEAT_VHE then set HCR_E2H and HCR_TGE and continue 313 * in EL2 314 * - Configure EL2 to support running the kernel at EL1 and exit to that 315 */ 316LENTRY(enter_kernel_el) 317#define INIT_SCTLR_EL1 (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_EIS | \ 318 SCTLR_TSCXT | SCTLR_EOS) 319 mrs x23, CurrentEL 320 and x23, x23, #(CURRENTEL_EL_MASK) 321 cmp x23, #(CURRENTEL_EL_EL2) 322 b.eq 1f 323 324 ldr x2, =INIT_SCTLR_EL1 325 msr sctlr_el1, x2 326 /* SCTLR_EOS is set so eret is a context synchronizing event so we 327 * need an isb here to ensure it's observed by later instructions, 328 * but don't need it in the eret below. 329 */ 330 isb 331 332 /* Ensure SPSR_EL1 and pstate are in sync. The only wat to set the 333 * latter is to set the former and return from an exception with eret. 334 */ 335 mov x2, #(PSR_DAIF | PSR_M_EL1h) 336 msr spsr_el1, x2 337 msr elr_el1, lr 338 eret 339 3401: 341 dsb sy 342 /* 343 * Set just the reserved bits in sctlr_el2. This will disable the 344 * MMU which may have broken the kernel if we enter the kernel in 345 * EL2, e.g. when using VHE. 346 */ 347 ldr x2, =(SCTLR_EL2_RES1 | SCTLR_EL2_EIS | SCTLR_EL2_EOS) 348 msr sctlr_el2, x2 349 isb 350 351 /* Configure the Hypervisor */ 352 ldr x2, =(HCR_RW | HCR_APK | HCR_API | HCR_E2H) 353 msr hcr_el2, x2 354 355 /* Stash value of HCR_EL2 for later */ 356 isb 357 mrs x4, hcr_el2 358 359 /* Load the Virtualization Process ID Register */ 360 mrs x2, midr_el1 361 msr vpidr_el2, x2 362 363 /* Load the Virtualization Multiprocess ID Register */ 364 mrs x2, mpidr_el1 365 msr vmpidr_el2, x2 366 367 /* Set the initial sctlr_el1 */ 368 ldr x2, =INIT_SCTLR_EL1 369 msr sctlr_el1, x2 370 371 /* Check if the E2H flag is set */ 372 tst x4, #HCR_E2H 373 b.eq .Lno_vhe 374 375 /* 376 * The kernel will be running in EL2, route exceptions here rather 377 * than EL1. 378 */ 379 orr x4, x4, #(HCR_TGE) 380 msr hcr_el2, x4 381 isb 382 383 msr SCTLR_EL12_REG, x2 384 mov x2, xzr /* CPTR_EL2 is managed by vfp.c */ 385 ldr x3, =(CNTHCTL_E2H_EL1PCTEN | CNTHCTL_E2H_EL1PTEN) 386 ldr x5, =(PSR_DAIF | PSR_M_EL2h) 387 b .Ldone_vhe 388 389.Lno_vhe: 390 /* Hypervisor trap functions */ 391 adrp x2, hyp_stub_vectors 392 add x2, x2, :lo12:hyp_stub_vectors 393 msr vbar_el2, x2 394 395 ldr x2, =(CPTR_RES1) 396 ldr x3, =(CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN) 397 ldr x5, =(PSR_DAIF | PSR_M_EL1h) 398 399.Ldone_vhe: 400 401 msr cptr_el2, x2 402 /* Enable access to the physical timers at EL1 */ 403 msr cnthctl_el2, x3 404 /* Set the return PSTATE */ 405 msr spsr_el2, x5 406 407 /* 408 * Configure the Extended Hypervisor register. This is only valid if 409 * FEAT_HCX is enabled. 410 */ 411 CHECK_CPU_FEAT(x2, ID_AA64MMFR1, HCX, 2f) 412 /* Extended Hypervisor Configuration */ 413 mov x2, xzr 414 msr HCRX_EL2_REG, x2 415 isb 4162: 417 418 /* Don't trap to EL2 for CP15 traps */ 419 msr hstr_el2, xzr 420 421 /* Set the counter offset to a known value */ 422 msr cntvoff_el2, xzr 423 424 /* Zero vttbr_el2 so a hypervisor can tell the host and guest apart */ 425 msr vttbr_el2, xzr 426 427 /* Check the CPU supports GIC, and configure the CPU interface */ 428 CHECK_CPU_FEAT(x2, ID_AA64PFR0, GIC, 3f) 429 430 mrs x2, icc_sre_el2 431 orr x2, x2, #ICC_SRE_EL2_EN /* Enable access from insecure EL1 */ 432 orr x2, x2, #ICC_SRE_EL2_SRE /* Enable system registers */ 433 msr icc_sre_el2, x2 4343: 435 436 /* Set the address to return to our return address */ 437 msr elr_el2, x30 438 isb 439 440 eret 441#undef INIT_SCTLR_EL1 442LEND(enter_kernel_el) 443 444/* 445 * Get the physical address the kernel was loaded at. 446 */ 447LENTRY(get_load_phys_addr) 448 /* Load the offset of get_load_phys_addr from KERNBASE */ 449 ldr x28, =(get_load_phys_addr - KERNBASE) 450 /* Load the physical address of get_load_phys_addr */ 451 adr x29, get_load_phys_addr 452 /* Find the physical address of KERNBASE, i.e. our load address */ 453 sub x28, x29, x28 454 ret 455LEND(get_load_phys_addr) 456 457/* 458 * This builds the page tables containing the identity map, and the kernel 459 * virtual map. 460 * 461 * It relys on: 462 * We were loaded to an address that is on a 2MiB boundary 463 * All the memory must not cross a 1GiB boundaty 464 * x28 contains the physical address we were loaded from 465 * 466 * There are 7 or 8 pages before that address for the page tables 467 * The pages used are: 468 * - The Kernel L3 tables (only for 16k kernel) 469 * - The Kernel L2 table 470 * - The Kernel L1 table 471 * - The Kernel L0 table (TTBR1) 472 * - The identity (PA = VA) L2 table 473 * - The identity (PA = VA) L1 table 474 * - The identity (PA = VA) L0 table (Early TTBR0) 475 * - The Kernel empty L0 table (Late TTBR0) 476 */ 477LENTRY(create_pagetables) 478 /* Save the Link register */ 479 mov x5, x30 480 481 /* Clean the page table */ 482 adrp x6, pagetable 483 add x6, x6, :lo12:pagetable 484 mov x26, x6 485 adrp x27, pagetable_end 486 add x27, x27, :lo12:pagetable_end 4871: 488 stp xzr, xzr, [x6], #16 489 stp xzr, xzr, [x6], #16 490 stp xzr, xzr, [x6], #16 491 stp xzr, xzr, [x6], #16 492 cmp x6, x27 493 b.lo 1b 494 495#ifdef __ARM_FEATURE_BTI_DEFAULT 496 /* 497 * Check if the CPU supports BTI 498 */ 499 mrs x6, id_aa64pfr1_el1 /* Read the ID register */ 500 and x6, x6, ID_AA64PFR1_BT_MASK /* Mask the field we need */ 501 cmp x6, xzr /* Check it's non-zero */ 502 cset x6, ne /* x6 is set if non-zero */ 503 lsl x21, x6, ATTR_S1_GP_SHIFT /* Shift to the correct bit */ 504#endif 505 506 /* 507 * Find the shareability attribute we should use. If FEAT_LPA2 is 508 * enabled then the shareability field is moved from the page table 509 * to tcr_el1 and the bits in the page table are reused by the 510 * address field. 511 */ 512#if PAGE_SIZE == PAGE_SIZE_4K 513#define LPA2_MASK ID_AA64MMFR0_TGran4_MASK 514#define LPA2_VAL ID_AA64MMFR0_TGran4_LPA2 515#elif PAGE_SIZE == PAGE_SIZE_16K 516#define LPA2_MASK ID_AA64MMFR0_TGran16_MASK 517#define LPA2_VAL ID_AA64MMFR0_TGran16_LPA2 518#else 519#error Unsupported page size 520#endif 521 mrs x6, id_aa64mmfr0_el1 522 mov x7, LPA2_VAL 523 and x6, x6, LPA2_MASK 524 cmp x6, x7 525 ldr x22, =(ATTR_SH(ATTR_SH_IS)) 526 csel x22, xzr, x22, eq 527#undef LPA2_MASK 528#undef LPA2_VAL 529 530 /* 531 * Build the TTBR1 maps. 532 */ 533 534 /* Find the size of the kernel */ 535 mov x6, #(KERNBASE) 536 537#if defined(LINUX_BOOT_ABI) 538 /* X19 is used as 'map FDT data' flag */ 539 mov x19, xzr 540 541 /* No modules or FDT pointer ? */ 542 cbz x0, booti_no_fdt 543 544 /* 545 * Test if x0 points to modules descriptor(virtual address) or 546 * to FDT (physical address) 547 */ 548 cmp x0, x6 /* x6 is #(KERNBASE) */ 549 b.lo booti_fdt 550#endif 551 552 /* Booted with modules pointer */ 553 /* Find modulep - begin */ 554 sub x8, x0, x6 555 /* 556 * Add space for the module data. When PAGE_SIZE is 4k this will 557 * add at least 2 level 2 blocks (2 * 2MiB). When PAGE_SIZE is 558 * larger it will be at least as large as we use smaller level 3 559 * pages. 560 */ 561 ldr x7, =((6 * 1024 * 1024) - 1) 562 add x8, x8, x7 563 b common 564 565#if defined(LINUX_BOOT_ABI) 566booti_fdt: 567 /* Booted by U-Boot booti with FDT data */ 568 /* Set 'map FDT data' flag */ 569 mov x19, #1 570 571booti_no_fdt: 572 /* Booted by U-Boot booti without FTD data */ 573 /* Find the end - begin */ 574 ldr x7, .Lend 575 sub x8, x7, x6 576 577 /* 578 * Add one 2MiB page for copy of FDT data (maximum FDT size), 579 * one for metadata and round up 580 */ 581 ldr x7, =(3 * L2_SIZE - 1) 582 add x8, x8, x7 583#endif 584 585common: 586#if PAGE_SIZE != PAGE_SIZE_4K 587 /* 588 * Create L3 and L3C pages. The kernel will be loaded at a 2M aligned 589 * address, enabling the creation of L3C pages. However, when the page 590 * size is larger than 4k, L2 blocks are too large to map the kernel 591 * with 2M alignment. 592 */ 593#define PTE_SHIFT L3_SHIFT 594#define BUILD_PTE_FUNC build_l3_page_pagetable 595#else 596#define PTE_SHIFT L2_SHIFT 597#define BUILD_PTE_FUNC build_l2_block_pagetable 598#endif 599 600 /* Get the number of blocks/pages to allocate, rounded down */ 601 lsr x10, x8, #(PTE_SHIFT) 602 603 /* Create the kernel space PTE table */ 604 mov x6, x26 605 mov x7, #(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) 606 mov x8, #(KERNBASE) 607 mov x9, x28 608 bl BUILD_PTE_FUNC 609 610#undef PTE_SHIFT 611#undef BUILD_PTE_FUNC 612 613#if PAGE_SIZE != PAGE_SIZE_4K 614 /* Move to the l2 table */ 615 ldr x9, =(PAGE_SIZE * L3_PAGE_COUNT) 616 add x26, x26, x9 617 618 /* Link the l2 -> l3 table */ 619 mov x9, x6 620 mov x6, x26 621 bl link_l2_pagetable 622#endif 623 624 /* Move to the l1 table */ 625 add x26, x26, #PAGE_SIZE 626 627 /* Link the l1 -> l2 table */ 628 mov x9, x6 629 mov x6, x26 630 bl link_l1_pagetable 631 632 /* Move to the l0 table */ 633 add x24, x26, #PAGE_SIZE 634 635 /* Link the l0 -> l1 table */ 636 mov x9, x6 637 mov x6, x24 638 mov x10, #1 639 bl link_l0_pagetable 640 641 /* 642 * Build the TTBR0 maps. As TTBR0 maps, they must specify ATTR_S1_nG. 643 * They are only needed early on, so the VA = PA map is uncached. 644 */ 645 add x27, x24, #PAGE_SIZE 646 647 mov x6, x27 /* The initial page table */ 648 649 /* Create the VA = PA map */ 650 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) 651 adrp x16, _start 652 and x16, x16, #(~L2_OFFSET) 653 mov x9, x16 /* PA start */ 654 mov x8, x16 /* VA start (== PA start) */ 655 mov x10, #1 656 bl build_l2_block_pagetable 657 658#if defined(SOCDEV_PA) 659 /* Create a table for the UART */ 660 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_DEVICE)) 661 ldr x9, =(L2_SIZE) 662 add x16, x16, x9 /* VA start */ 663 mov x8, x16 664 665 /* Store the socdev virtual address */ 666 add x17, x8, #(SOCDEV_PA & L2_OFFSET) 667 adrp x9, socdev_va 668 str x17, [x9, :lo12:socdev_va] 669 670 mov x9, #(SOCDEV_PA & ~L2_OFFSET) /* PA start */ 671 mov x10, #1 672 bl build_l2_block_pagetable 673#endif 674 675#if defined(LINUX_BOOT_ABI) 676 /* Map FDT data ? */ 677 cbz x19, 1f 678 679 /* Create the mapping for FDT data (2 MiB max) */ 680 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) 681 ldr x9, =(L2_SIZE) 682 add x16, x16, x9 /* VA start */ 683 mov x8, x16 684 mov x9, x0 /* PA start */ 685 /* Update the module pointer to point at the allocated memory */ 686 and x0, x0, #(L2_OFFSET) /* Keep the lower bits */ 687 add x0, x0, x8 /* Add the aligned virtual address */ 688 689 mov x10, #1 690 bl build_l2_block_pagetable 691 6921: 693#endif 694 695 /* Move to the l1 table */ 696 add x27, x27, #PAGE_SIZE 697 698 /* Link the l1 -> l2 table */ 699 mov x9, x6 700 mov x6, x27 701 bl link_l1_pagetable 702 703 /* Move to the l0 table */ 704 add x27, x27, #PAGE_SIZE 705 706 /* Link the l0 -> l1 table */ 707 mov x9, x6 708 mov x6, x27 709 mov x10, #1 710 bl link_l0_pagetable 711 712 /* Restore the Link register */ 713 mov x30, x5 714 ret 715LEND(create_pagetables) 716 717/* 718 * Builds an L0 -> L1 table descriptor 719 * 720 * x6 = L0 table 721 * x8 = Virtual Address 722 * x9 = L1 PA (trashed) 723 * x10 = Entry count (trashed) 724 * x11, x12 and x13 are trashed 725 */ 726LENTRY(link_l0_pagetable) 727 /* 728 * Link an L0 -> L1 table entry. 729 */ 730 /* Find the table index */ 731 lsr x11, x8, #L0_SHIFT 732 and x11, x11, #L0_ADDR_MASK 733 734 /* Build the L0 block entry */ 735 mov x12, #L0_TABLE 736 orr x12, x12, #(TATTR_UXN_TABLE | TATTR_AP_TABLE_NO_EL0) 737 738 /* Only use the output address bits */ 739 lsr x9, x9, #PAGE_SHIFT 7401: orr x13, x12, x9, lsl #PAGE_SHIFT 741 742 /* Store the entry */ 743 str x13, [x6, x11, lsl #3] 744 745 sub x10, x10, #1 746 add x11, x11, #1 747 add x9, x9, #1 748 cbnz x10, 1b 749 750 ret 751LEND(link_l0_pagetable) 752 753/* 754 * Builds an L1 -> L2 table descriptor 755 * 756 * x6 = L1 table 757 * x8 = Virtual Address 758 * x9 = L2 PA (trashed) 759 * x11, x12 and x13 are trashed 760 */ 761LENTRY(link_l1_pagetable) 762 /* 763 * Link an L1 -> L2 table entry. 764 */ 765 /* Find the table index */ 766 lsr x11, x8, #L1_SHIFT 767 and x11, x11, #Ln_ADDR_MASK 768 769 /* Build the L1 block entry */ 770 mov x12, #L1_TABLE 771 772 /* Only use the output address bits */ 773 lsr x9, x9, #PAGE_SHIFT 774 orr x13, x12, x9, lsl #PAGE_SHIFT 775 776 /* Store the entry */ 777 str x13, [x6, x11, lsl #3] 778 779 ret 780LEND(link_l1_pagetable) 781 782/* 783 * Builds count 2 MiB page table entry 784 * x6 = L2 table 785 * x7 = Block attributes 786 * x8 = VA start 787 * x9 = PA start (trashed) 788 * x10 = Entry count (trashed) 789 * x11, x12 and x13 are trashed 790 */ 791LENTRY(build_l2_block_pagetable) 792 /* 793 * Build the L2 table entry. 794 */ 795 /* Find the table index */ 796 lsr x11, x8, #L2_SHIFT 797 and x11, x11, #Ln_ADDR_MASK 798 799 /* Build the L2 block entry */ 800 orr x12, x7, #L2_BLOCK 801 orr x12, x12, #(ATTR_AF) 802 orr x12, x12, #(ATTR_S1_UXN) 803#ifdef __ARM_FEATURE_BTI_DEFAULT 804 orr x12, x12, x21 805#endif 806 /* Set the shareability attribute */ 807 orr x12, x12, x22 808 809 /* Only use the output address bits */ 810 lsr x9, x9, #L2_SHIFT 811 812 /* Set the physical address for this virtual address */ 8131: orr x13, x12, x9, lsl #L2_SHIFT 814 815 /* Store the entry */ 816 str x13, [x6, x11, lsl #3] 817 818 sub x10, x10, #1 819 add x11, x11, #1 820 add x9, x9, #1 821 cbnz x10, 1b 822 823 ret 824LEND(build_l2_block_pagetable) 825 826#if PAGE_SIZE != PAGE_SIZE_4K 827/* 828 * Builds an L2 -> L3 table descriptor 829 * 830 * x6 = L2 table 831 * x8 = Virtual Address 832 * x9 = L3 PA (trashed) 833 * x11, x12 and x13 are trashed 834 */ 835LENTRY(link_l2_pagetable) 836 /* 837 * Link an L2 -> L3 table entry. 838 */ 839 /* Find the table index */ 840 lsr x11, x8, #L2_SHIFT 841 and x11, x11, #Ln_ADDR_MASK 842 843 /* Build the L1 block entry */ 844 mov x12, #L2_TABLE 845 846 /* Only use the output address bits */ 847 lsr x9, x9, #PAGE_SHIFT 848 orr x13, x12, x9, lsl #PAGE_SHIFT 849 850 /* Store the entry */ 851 str x13, [x6, x11, lsl #3] 852 853 ret 854LEND(link_l2_pagetable) 855 856/* 857 * Builds count level 3 page table entries. Uses ATTR_CONTIGUOUS to create 858 * large page (L3C) mappings when the current VA and remaining count allow 859 * it. 860 * x6 = L3 table 861 * x7 = Block attributes 862 * x8 = VA start 863 * x9 = PA start (trashed) 864 * x10 = Entry count (trashed) 865 * x11, x12 and x13 are trashed 866 * 867 * VA start (x8) modulo L3C_SIZE must equal PA start (x9) modulo L3C_SIZE. 868 */ 869LENTRY(build_l3_page_pagetable) 870 /* 871 * Build the L3 table entry. 872 */ 873 /* Find the table index */ 874 lsr x11, x8, #L3_SHIFT 875 and x11, x11, #Ln_ADDR_MASK 876 877 /* Build the L3 page entry */ 878 orr x12, x7, #L3_PAGE 879 orr x12, x12, #(ATTR_AF) 880 orr x12, x12, #(ATTR_S1_UXN) 881#ifdef __ARM_FEATURE_BTI_DEFAULT 882 orr x12, x12, x21 883#endif 884 /* Set the shareability attribute */ 885 orr x12, x12, x22 886 887 /* Only use the output address bits */ 888 lsr x9, x9, #L3_SHIFT 889 890 /* Check if an ATTR_CONTIGUOUS mapping is possible */ 8911: tst x11, #(L3C_ENTRIES - 1) 892 b.ne 2f 893 cmp x10, #L3C_ENTRIES 894 b.lo 3f 895 orr x12, x12, #(ATTR_CONTIGUOUS) 896 b 2f 8973: and x12, x12, #(~ATTR_CONTIGUOUS) 898 899 /* Set the physical address for this virtual address */ 9002: orr x13, x12, x9, lsl #L3_SHIFT 901 902 /* Store the entry */ 903 str x13, [x6, x11, lsl #3] 904 905 sub x10, x10, #1 906 add x11, x11, #1 907 add x9, x9, #1 908 cbnz x10, 1b 909 910 ret 911LEND(build_l3_page_pagetable) 912#endif 913 914LENTRY(start_mmu) 915 dsb sy 916 917 /* Load the exception vectors */ 918 ldr x2, =exception_vectors 919 msr vbar_el1, x2 920 921 /* Load ttbr0 and ttbr1 */ 922 msr ttbr0_el1, x27 923 msr ttbr1_el1, x24 924 isb 925 926 /* Clear the Monitor Debug System control register */ 927 msr mdscr_el1, xzr 928 929 /* Invalidate the TLB */ 930 tlbi vmalle1is 931 dsb ish 932 isb 933 934 ldr x2, mair 935 msr mair_el1, x2 936 937 /* 938 * Setup TCR according to the PARange and ASIDBits fields 939 * from ID_AA64MMFR0_EL1 and the HAFDBS field from the 940 * ID_AA64MMFR1_EL1. More precisely, set TCR_EL1.AS 941 * to 1 only if the ASIDBits field equals 0b0010. 942 */ 943 ldr x2, tcr 944 945 /* If x22 contains a non-zero value then LPA2 is not implemented */ 946 cbnz x22, .Lno_lpa2 947 ldr x3, =(TCR_DS) 948 orr x2, x2, x3 949.Lno_lpa2: 950 951 mrs x3, id_aa64mmfr0_el1 952 953 /* Copy the bottom 3 bits from id_aa64mmfr0_el1 into TCR.IPS */ 954 bfi x2, x3, #(TCR_IPS_SHIFT), #(TCR_IPS_WIDTH) 955 and x3, x3, #(ID_AA64MMFR0_ASIDBits_MASK) 956 957 /* Check if the HW supports 16 bit ASIDS */ 958 cmp x3, #(ID_AA64MMFR0_ASIDBits_16) 959 /* If so x3 == 1, else x3 == 0 */ 960 cset x3, eq 961 /* Set TCR.AS with x3 */ 962 bfi x2, x3, #(TCR_ASID_SHIFT), #(TCR_ASID_WIDTH) 963 964 /* 965 * Check if the HW supports access flag updates, and set 966 * TCR_EL1.HA accordingly. The TCR_EL1.HD flag to enable 967 * HW management of dirty state is set in C code as it may 968 * need to be disabled because of CPU errata. 969 */ 970 CHECK_CPU_FEAT(x3, ID_AA64MMFR1, HAFDBS, 1f) 971 orr x2, x2, #(TCR_HA) 9721: 973 974 msr tcr_el1, x2 975 976 /* 977 * Setup SCTLR. 978 */ 979 ldr x2, sctlr_set 980 ldr x3, sctlr_clear 981 mrs x1, sctlr_el1 982 bic x1, x1, x3 /* Clear the required bits */ 983 orr x1, x1, x2 /* Set the required bits */ 984 msr sctlr_el1, x1 985 isb 986 987 ret 988 989 .align 3 990mair: 991 .quad MAIR_ATTR(MAIR_DEVICE_nGnRnE, VM_MEMATTR_DEVICE_nGnRnE) | \ 992 MAIR_ATTR(MAIR_NORMAL_NC, VM_MEMATTR_UNCACHEABLE) | \ 993 MAIR_ATTR(MAIR_NORMAL_WB, VM_MEMATTR_WRITE_BACK) | \ 994 MAIR_ATTR(MAIR_NORMAL_WT, VM_MEMATTR_WRITE_THROUGH) | \ 995 MAIR_ATTR(MAIR_DEVICE_nGnRE, VM_MEMATTR_DEVICE_nGnRE) 996tcr: 997#if PAGE_SIZE == PAGE_SIZE_4K 998#define TCR_TG (TCR_TG1_4K | TCR_TG0_4K) 999#elif PAGE_SIZE == PAGE_SIZE_16K 1000#define TCR_TG (TCR_TG1_16K | TCR_TG0_16K) 1001#else 1002#error Unsupported page size 1003#endif 1004 1005 .quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_TG | \ 1006 TCR_SH1_IS | TCR_ORGN1_WBWA | TCR_IRGN1_WBWA | \ 1007 TCR_SH0_IS | TCR_ORGN0_WBWA | TCR_IRGN0_WBWA) 1008sctlr_set: 1009 /* Bits to set */ 1010 .quad (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_UCI | SCTLR_SPAN | \ 1011 SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | SCTLR_DZE | \ 1012 SCTLR_I | SCTLR_SED | SCTLR_SA0 | SCTLR_SA | SCTLR_C | \ 1013 SCTLR_M | SCTLR_CP15BEN | SCTLR_BT1 | SCTLR_BT0) 1014sctlr_clear: 1015 /* Bits to clear */ 1016 .quad (SCTLR_EE | SCTLR_E0E | SCTLR_IESB | SCTLR_WXN | SCTLR_UMA | \ 1017 SCTLR_ITD | SCTLR_A) 1018LEND(start_mmu) 1019 1020ENTRY(abort) 1021 b abort 1022END(abort) 1023 1024.bss 1025 .align PAGE_SHIFT 1026initstack: 1027 .space BOOT_STACK_SIZE 1028initstack_end: 1029 1030 .section .init_pagetable, "aw", %nobits 1031 .align PAGE_SHIFT 1032 /* 1033 * 6 initial tables (in the following order): 1034 * L2 for kernel (High addresses) 1035 * L1 for kernel 1036 * L0 for kernel 1037 * L1 bootstrap for user (Low addresses) 1038 * L0 bootstrap for user 1039 * L0 for user 1040 */ 1041 .globl pagetable_l0_ttbr1 1042pagetable: 1043#if PAGE_SIZE != PAGE_SIZE_4K 1044 .space (PAGE_SIZE * L3_PAGE_COUNT) 1045pagetable_l2_ttbr1: 1046#endif 1047 .space PAGE_SIZE 1048pagetable_l1_ttbr1: 1049 .space PAGE_SIZE 1050pagetable_l0_ttbr1: 1051 .space PAGE_SIZE 1052pagetable_l2_ttbr0_bootstrap: 1053 .space PAGE_SIZE 1054pagetable_l1_ttbr0_bootstrap: 1055 .space PAGE_SIZE 1056pagetable_l0_ttbr0_bootstrap: 1057 .space PAGE_SIZE 1058pagetable_l0_ttbr0: 1059 .space PAGE_SIZE 1060pagetable_end: 1061 1062el2_pagetable: 1063 .space PAGE_SIZE 1064 1065 .section .rodata, "a", %progbits 1066 .globl aarch32_sigcode 1067 .align 2 1068aarch32_sigcode: 1069 .word 0xe1a0000d // mov r0, sp 1070 .word 0xe2800040 // add r0, r0, #SIGF_UC 1071 .word 0xe59f700c // ldr r7, [pc, #12] 1072 .word 0xef000000 // swi #0 1073 .word 0xe59f7008 // ldr r7, [pc, #8] 1074 .word 0xef000000 // swi #0 1075 .word 0xeafffffa // b . - 16 1076 .word SYS_sigreturn 1077 .word SYS_exit 1078 .align 3 1079 .size aarch32_sigcode, . - aarch32_sigcode 1080aarch32_esigcode: 1081 .data 1082 .global sz_aarch32_sigcode 1083sz_aarch32_sigcode: 1084 .quad aarch32_esigcode - aarch32_sigcode 1085 1086GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) 1087