1/*- 2 * Copyright (c) 2012-2014 Andrew Turner 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include "assym.inc" 28#include "opt_kstack_pages.h" 29#include <sys/syscall.h> 30#include <machine/asm.h> 31#include <machine/armreg.h> 32#include <machine/hypervisor.h> 33#include <machine/param.h> 34#include <machine/pte.h> 35#include <machine/vm.h> 36#include <machine/vmparam.h> 37 38#define VIRT_BITS 48 39 40#if PAGE_SIZE == PAGE_SIZE_16K 41/* 42 * The number of level 3 tables to create. 32 will allow for 1G of address 43 * space, the same as a single level 2 page with 4k pages. 44 */ 45#define L3_PAGE_COUNT 32 46#endif 47 48/* 49 * The size of our bootstrap stack. 50 */ 51#define BOOT_STACK_SIZE (KSTACK_PAGES * PAGE_SIZE) 52 53 .globl kernbase 54 .set kernbase, KERNBASE 55 56/* 57 * We assume: 58 * MMU on with an identity map, or off 59 * D-Cache: off 60 * I-Cache: on or off 61 * We are loaded at a 2MiB aligned address 62 */ 63 64ENTRY(_start) 65 /* Enter the kernel exception level */ 66 bl enter_kernel_el 67 68 /* 69 * Disable the MMU. We may have entered the kernel with it on and 70 * will need to update the tables later. If this has been set up 71 * with anything other than a VA == PA map then this will fail, 72 * but in this case the code to find where we are running from 73 * would have also failed. 74 */ 75 dsb sy 76 mrs x2, sctlr_el1 77 bic x2, x2, SCTLR_M 78 msr sctlr_el1, x2 79 isb 80 81 /* Set the context id */ 82 msr contextidr_el1, xzr 83 84 /* Get the virt -> phys offset */ 85 bl get_load_phys_addr 86 87 /* 88 * At this point: 89 * x28 = Our physical load address 90 */ 91 92 /* Create the page tables */ 93 bl create_pagetables 94 95 /* 96 * At this point: 97 * x27 = TTBR0 table 98 * x26 = Kernel L1 table 99 * x24 = TTBR1 table 100 */ 101 102 /* Enable the mmu */ 103 bl start_mmu 104 105 /* Load the new ttbr0 pagetable */ 106 adrp x27, pagetable_l0_ttbr0 107 add x27, x27, :lo12:pagetable_l0_ttbr0 108 109 /* Jump to the virtual address space */ 110 ldr x15, .Lvirtdone 111 br x15 112 113virtdone: 114 BTI_J 115 116 /* Set up the stack */ 117 adrp x25, initstack_end 118 add x25, x25, :lo12:initstack_end 119 sub sp, x25, #PCB_SIZE 120 121 /* Zero the BSS */ 122 ldr x15, .Lbss 123 ldr x14, .Lend 1241: 125 str xzr, [x15], #8 126 cmp x15, x14 127 b.lo 1b 128 129#if defined(PERTHREAD_SSP) 130 /* Set sp_el0 to the boot canary for early per-thread SSP to work */ 131 adrp x15, boot_canary 132 add x15, x15, :lo12:boot_canary 133 msr sp_el0, x15 134#endif 135 136 /* Backup the module pointer */ 137 mov x1, x0 138 139 sub sp, sp, #BOOTPARAMS_SIZE 140 mov x0, sp 141 142 str x1, [x0, #BP_MODULEP] 143 adrp x25, initstack 144 add x25, x25, :lo12:initstack 145 str x25, [x0, #BP_KERN_STACK] 146 str x27, [x0, #BP_KERN_TTBR0] 147 str x23, [x0, #BP_BOOT_EL] 148 str x4, [x0, #BP_HCR_EL2] 149 150#ifdef KASAN 151 /* Save bootparams */ 152 mov x19, x0 153 154 /* Bootstrap an early shadow map for the boot stack. */ 155 ldr x0, [x0, #BP_KERN_STACK] 156 ldr x1, =BOOT_STACK_SIZE 157 bl kasan_init_early 158 159 /* Restore bootparams */ 160 mov x0, x19 161#endif 162 163 /* trace back starts here */ 164 mov fp, #0 165 /* Branch to C code */ 166 bl initarm 167 /* We are done with the boot params */ 168 add sp, sp, #BOOTPARAMS_SIZE 169 170 /* 171 * Enable pointer authentication in the kernel. We set the keys for 172 * thread0 in initarm so have to wait until it returns to enable it. 173 * If we were to enable it in initarm then any authentication when 174 * returning would fail as it was called with pointer authentication 175 * disabled. 176 */ 177 bl ptrauth_start 178 179 bl mi_startup 180 181 /* We should not get here */ 182 brk 0 183 184 .align 3 185.Lvirtdone: 186 .quad virtdone 187.Lbss: 188 .quad __bss_start 189.Lend: 190 .quad __bss_end 191END(_start) 192 193#ifdef SMP 194/* 195 * mpentry(unsigned long) 196 * 197 * Called by a core when it is being brought online. 198 * The data in x0 is passed straight to init_secondary. 199 */ 200ENTRY(mpentry) 201 /* Disable interrupts */ 202 msr daifset, #DAIF_INTR 203 204 /* Enter the kernel exception level */ 205 bl enter_kernel_el 206 207 /* Set the context id */ 208 msr contextidr_el1, xzr 209 210 /* Load the kernel page table */ 211 adrp x24, pagetable_l0_ttbr1 212 add x24, x24, :lo12:pagetable_l0_ttbr1 213 /* Load the identity page table */ 214 adrp x27, pagetable_l0_ttbr0_bootstrap 215 add x27, x27, :lo12:pagetable_l0_ttbr0_bootstrap 216 217 /* Enable the mmu */ 218 bl start_mmu 219 220 /* Load the new ttbr0 pagetable */ 221 adrp x27, pagetable_l0_ttbr0 222 add x27, x27, :lo12:pagetable_l0_ttbr0 223 224 /* Jump to the virtual address space */ 225 ldr x15, =mp_virtdone 226 br x15 227 228mp_virtdone: 229 BTI_J 230 231 /* Start using the AP boot stack */ 232 adrp x4, bootstack 233 ldr x4, [x4, :lo12:bootstack] 234 mov sp, x4 235 236#if defined(PERTHREAD_SSP) 237 /* Set sp_el0 to the boot canary for early per-thread SSP to work */ 238 adrp x15, boot_canary 239 add x15, x15, :lo12:boot_canary 240 msr sp_el0, x15 241#endif 242 243 /* Load the kernel ttbr0 pagetable */ 244 msr ttbr0_el1, x27 245 isb 246 247 /* Invalidate the TLB */ 248 tlbi vmalle1 249 dsb sy 250 isb 251 252 /* 253 * Initialize the per-CPU pointer before calling into C code, for the 254 * benefit of kernel sanitizers. 255 */ 256 adrp x18, bootpcpu 257 ldr x18, [x18, :lo12:bootpcpu] 258 msr tpidr_el1, x18 259 260 b init_secondary 261END(mpentry) 262#endif 263 264/* 265 * If we are started in EL2, configure the required hypervisor 266 * registers and drop to EL1. 267 */ 268LENTRY(enter_kernel_el) 269 mrs x23, CurrentEL 270 and x23, x23, #(CURRENTEL_EL_MASK) 271 cmp x23, #(CURRENTEL_EL_EL2) 272 b.eq 1f 273 ret 2741: 275 /* 276 * Disable the MMU. If the HCR_EL2.E2H field is set we will clear it 277 * which may break address translation. 278 */ 279 dsb sy 280 mrs x2, sctlr_el2 281 bic x2, x2, SCTLR_M 282 msr sctlr_el2, x2 283 isb 284 285 /* Configure the Hypervisor */ 286 ldr x2, =(HCR_RW | HCR_APK | HCR_API) 287 msr hcr_el2, x2 288 289 /* Stash value of HCR_EL2 for later */ 290 isb 291 mrs x4, hcr_el2 292 293 /* Load the Virtualization Process ID Register */ 294 mrs x2, midr_el1 295 msr vpidr_el2, x2 296 297 /* Load the Virtualization Multiprocess ID Register */ 298 mrs x2, mpidr_el1 299 msr vmpidr_el2, x2 300 301 /* Set the bits that need to be 1 in sctlr_el1 */ 302 ldr x2, .Lsctlr_res1 303 msr sctlr_el1, x2 304 305 /* 306 * On some hardware, e.g., Apple M1, we can't clear E2H, so make sure we 307 * don't trap to EL2 for SIMD register usage to have at least a 308 * minimally usable system. 309 */ 310 tst x4, #HCR_E2H 311 mov x3, #CPTR_RES1 /* HCR_E2H == 0 */ 312 mov x5, #CPTR_FPEN /* HCR_E2H == 1 */ 313 csel x2, x3, x5, eq 314 msr cptr_el2, x2 315 316 /* Don't trap to EL2 for CP15 traps */ 317 msr hstr_el2, xzr 318 319 /* Enable access to the physical timers at EL1 */ 320 mrs x2, cnthctl_el2 321 orr x2, x2, #(CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN) 322 msr cnthctl_el2, x2 323 324 /* Set the counter offset to a known value */ 325 msr cntvoff_el2, xzr 326 327 /* Hypervisor trap functions */ 328 adrp x2, hyp_stub_vectors 329 add x2, x2, :lo12:hyp_stub_vectors 330 msr vbar_el2, x2 331 332 /* Zero vttbr_el2 so a hypervisor can tell the host and guest apart */ 333 msr vttbr_el2, xzr 334 335 mov x2, #(PSR_DAIF | PSR_M_EL1h) 336 msr spsr_el2, x2 337 338 /* Configure GICv3 CPU interface */ 339 mrs x2, id_aa64pfr0_el1 340 /* Extract GIC bits from the register */ 341 ubfx x2, x2, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_BITS 342 /* GIC[3:0] == 0001 - GIC CPU interface via special regs. supported */ 343 cmp x2, #(ID_AA64PFR0_GIC_CPUIF_EN >> ID_AA64PFR0_GIC_SHIFT) 344 b.ne 2f 345 346 mrs x2, icc_sre_el2 347 orr x2, x2, #ICC_SRE_EL2_EN /* Enable access from insecure EL1 */ 348 orr x2, x2, #ICC_SRE_EL2_SRE /* Enable system registers */ 349 msr icc_sre_el2, x2 3502: 351 352 /* Set the address to return to our return address */ 353 msr elr_el2, x30 354 isb 355 356 eret 357 358 .align 3 359.Lsctlr_res1: 360 .quad SCTLR_RES1 361LEND(enter_kernel_el) 362 363/* 364 * Get the physical address the kernel was loaded at. 365 */ 366LENTRY(get_load_phys_addr) 367 /* Load the offset of get_load_phys_addr from KERNBASE */ 368 ldr x28, =(get_load_phys_addr - KERNBASE) 369 /* Load the physical address of get_load_phys_addr */ 370 adr x29, get_load_phys_addr 371 /* Find the physical address of KERNBASE, i.e. our load address */ 372 sub x28, x29, x28 373 ret 374LEND(get_load_phys_addr) 375 376/* 377 * This builds the page tables containing the identity map, and the kernel 378 * virtual map. 379 * 380 * It relys on: 381 * We were loaded to an address that is on a 2MiB boundary 382 * All the memory must not cross a 1GiB boundaty 383 * x28 contains the physical address we were loaded from 384 * 385 * TODO: This is out of date. 386 * There are at least 5 pages before that address for the page tables 387 * The pages used are: 388 * - The Kernel L2 table 389 * - The Kernel L1 table 390 * - The Kernel L0 table (TTBR1) 391 * - The identity (PA = VA) L1 table 392 * - The identity (PA = VA) L0 table (TTBR0) 393 */ 394LENTRY(create_pagetables) 395 /* Save the Link register */ 396 mov x5, x30 397 398 /* Clean the page table */ 399 adrp x6, pagetable 400 add x6, x6, :lo12:pagetable 401 mov x26, x6 402 adrp x27, pagetable_end 403 add x27, x27, :lo12:pagetable_end 4041: 405 stp xzr, xzr, [x6], #16 406 stp xzr, xzr, [x6], #16 407 stp xzr, xzr, [x6], #16 408 stp xzr, xzr, [x6], #16 409 cmp x6, x27 410 b.lo 1b 411 412 /* 413 * Build the TTBR1 maps. 414 */ 415 416 /* Find the size of the kernel */ 417 mov x6, #(KERNBASE) 418 419#if defined(LINUX_BOOT_ABI) 420 /* X19 is used as 'map FDT data' flag */ 421 mov x19, xzr 422 423 /* No modules or FDT pointer ? */ 424 cbz x0, booti_no_fdt 425 426 /* 427 * Test if x0 points to modules descriptor(virtual address) or 428 * to FDT (physical address) 429 */ 430 cmp x0, x6 /* x6 is #(KERNBASE) */ 431 b.lo booti_fdt 432#endif 433 434 /* Booted with modules pointer */ 435 /* Find modulep - begin */ 436 sub x8, x0, x6 437 /* 438 * Add space for the module data. When PAGE_SIZE is 4k this will 439 * add at least 2 level 2 blocks (2 * 2MiB). When PAGE_SIZE is 440 * larger it will be at least as large as we use smaller level 3 441 * pages. 442 */ 443 ldr x7, =((6 * 1024 * 1024) - 1) 444 add x8, x8, x7 445 b common 446 447#if defined(LINUX_BOOT_ABI) 448booti_fdt: 449 /* Booted by U-Boot booti with FDT data */ 450 /* Set 'map FDT data' flag */ 451 mov x19, #1 452 453booti_no_fdt: 454 /* Booted by U-Boot booti without FTD data */ 455 /* Find the end - begin */ 456 ldr x7, .Lend 457 sub x8, x7, x6 458 459 /* 460 * Add one 2MiB page for copy of FDT data (maximum FDT size), 461 * one for metadata and round up 462 */ 463 ldr x7, =(3 * L2_SIZE - 1) 464 add x8, x8, x7 465#endif 466 467common: 468#if PAGE_SIZE != PAGE_SIZE_4K 469 /* 470 * Create L3 pages. The kernel will be loaded at a 2M aligned 471 * address, however L2 blocks are too large when the page size is 472 * not 4k to map the kernel with such an aligned address. However, 473 * when the page size is larger than 4k, L2 blocks are too large to 474 * map the kernel with such an alignment. 475 */ 476 477 /* Get the number of l3 pages to allocate, rounded down */ 478 lsr x10, x8, #(L3_SHIFT) 479 480 /* Create the kernel space L2 table */ 481 mov x6, x26 482 mov x7, #(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) 483 mov x8, #(KERNBASE) 484 mov x9, x28 485 bl build_l3_page_pagetable 486 487 /* Move to the l2 table */ 488 ldr x9, =(PAGE_SIZE * L3_PAGE_COUNT) 489 add x26, x26, x9 490 491 /* Link the l2 -> l3 table */ 492 mov x9, x6 493 mov x6, x26 494 bl link_l2_pagetable 495#else 496 /* Get the number of l2 pages to allocate, rounded down */ 497 lsr x10, x8, #(L2_SHIFT) 498 499 /* Create the kernel space L2 table */ 500 mov x6, x26 501 mov x7, #(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) 502 mov x8, #(KERNBASE) 503 mov x9, x28 504 bl build_l2_block_pagetable 505#endif 506 507 /* Move to the l1 table */ 508 add x26, x26, #PAGE_SIZE 509 510 /* Link the l1 -> l2 table */ 511 mov x9, x6 512 mov x6, x26 513 bl link_l1_pagetable 514 515 /* Move to the l0 table */ 516 add x24, x26, #PAGE_SIZE 517 518 /* Link the l0 -> l1 table */ 519 mov x9, x6 520 mov x6, x24 521 mov x10, #1 522 bl link_l0_pagetable 523 524 /* 525 * Build the TTBR0 maps. As TTBR0 maps, they must specify ATTR_S1_nG. 526 * They are only needed early on, so the VA = PA map is uncached. 527 */ 528 add x27, x24, #PAGE_SIZE 529 530 mov x6, x27 /* The initial page table */ 531 532 /* Create the VA = PA map */ 533 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) 534 adrp x16, _start 535 and x16, x16, #(~L2_OFFSET) 536 mov x9, x16 /* PA start */ 537 mov x8, x16 /* VA start (== PA start) */ 538 mov x10, #1 539 bl build_l2_block_pagetable 540 541#if defined(SOCDEV_PA) 542 /* Create a table for the UART */ 543 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_DEVICE)) 544 ldr x9, =(L2_SIZE) 545 add x16, x16, x9 /* VA start */ 546 mov x8, x16 547 548 /* Store the socdev virtual address */ 549 add x17, x8, #(SOCDEV_PA & L2_OFFSET) 550 adrp x9, socdev_va 551 str x17, [x9, :lo12:socdev_va] 552 553 mov x9, #(SOCDEV_PA & ~L2_OFFSET) /* PA start */ 554 mov x10, #1 555 bl build_l2_block_pagetable 556#endif 557 558#if defined(LINUX_BOOT_ABI) 559 /* Map FDT data ? */ 560 cbz x19, 1f 561 562 /* Create the mapping for FDT data (2 MiB max) */ 563 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) 564 ldr x9, =(L2_SIZE) 565 add x16, x16, x9 /* VA start */ 566 mov x8, x16 567 mov x9, x0 /* PA start */ 568 /* Update the module pointer to point at the allocated memory */ 569 and x0, x0, #(L2_OFFSET) /* Keep the lower bits */ 570 add x0, x0, x8 /* Add the aligned virtual address */ 571 572 mov x10, #1 573 bl build_l2_block_pagetable 574 5751: 576#endif 577 578 /* Move to the l1 table */ 579 add x27, x27, #PAGE_SIZE 580 581 /* Link the l1 -> l2 table */ 582 mov x9, x6 583 mov x6, x27 584 bl link_l1_pagetable 585 586 /* Move to the l0 table */ 587 add x27, x27, #PAGE_SIZE 588 589 /* Link the l0 -> l1 table */ 590 mov x9, x6 591 mov x6, x27 592 mov x10, #1 593 bl link_l0_pagetable 594 595 /* Restore the Link register */ 596 mov x30, x5 597 ret 598LEND(create_pagetables) 599 600/* 601 * Builds an L0 -> L1 table descriptor 602 * 603 * x6 = L0 table 604 * x8 = Virtual Address 605 * x9 = L1 PA (trashed) 606 * x10 = Entry count (trashed) 607 * x11, x12 and x13 are trashed 608 */ 609LENTRY(link_l0_pagetable) 610 /* 611 * Link an L0 -> L1 table entry. 612 */ 613 /* Find the table index */ 614 lsr x11, x8, #L0_SHIFT 615 and x11, x11, #L0_ADDR_MASK 616 617 /* Build the L0 block entry */ 618 mov x12, #L0_TABLE 619 orr x12, x12, #(TATTR_UXN_TABLE | TATTR_AP_TABLE_NO_EL0) 620 621 /* Only use the output address bits */ 622 lsr x9, x9, #PAGE_SHIFT 6231: orr x13, x12, x9, lsl #PAGE_SHIFT 624 625 /* Store the entry */ 626 str x13, [x6, x11, lsl #3] 627 628 sub x10, x10, #1 629 add x11, x11, #1 630 add x9, x9, #1 631 cbnz x10, 1b 632 633 ret 634LEND(link_l0_pagetable) 635 636/* 637 * Builds an L1 -> L2 table descriptor 638 * 639 * x6 = L1 table 640 * x8 = Virtual Address 641 * x9 = L2 PA (trashed) 642 * x11, x12 and x13 are trashed 643 */ 644LENTRY(link_l1_pagetable) 645 /* 646 * Link an L1 -> L2 table entry. 647 */ 648 /* Find the table index */ 649 lsr x11, x8, #L1_SHIFT 650 and x11, x11, #Ln_ADDR_MASK 651 652 /* Build the L1 block entry */ 653 mov x12, #L1_TABLE 654 655 /* Only use the output address bits */ 656 lsr x9, x9, #PAGE_SHIFT 657 orr x13, x12, x9, lsl #PAGE_SHIFT 658 659 /* Store the entry */ 660 str x13, [x6, x11, lsl #3] 661 662 ret 663LEND(link_l1_pagetable) 664 665/* 666 * Builds count 2 MiB page table entry 667 * x6 = L2 table 668 * x7 = Block attributes 669 * x8 = VA start 670 * x9 = PA start (trashed) 671 * x10 = Entry count (trashed) 672 * x11, x12 and x13 are trashed 673 */ 674LENTRY(build_l2_block_pagetable) 675 /* 676 * Build the L2 table entry. 677 */ 678 /* Find the table index */ 679 lsr x11, x8, #L2_SHIFT 680 and x11, x11, #Ln_ADDR_MASK 681 682 /* Build the L2 block entry */ 683 orr x12, x7, #L2_BLOCK 684 orr x12, x12, #(ATTR_DEFAULT) 685 orr x12, x12, #(ATTR_S1_UXN) 686#ifdef __ARM_FEATURE_BTI_DEFAULT 687 orr x12, x12, #(ATTR_S1_GP) 688#endif 689 690 /* Only use the output address bits */ 691 lsr x9, x9, #L2_SHIFT 692 693 /* Set the physical address for this virtual address */ 6941: orr x13, x12, x9, lsl #L2_SHIFT 695 696 /* Store the entry */ 697 str x13, [x6, x11, lsl #3] 698 699 sub x10, x10, #1 700 add x11, x11, #1 701 add x9, x9, #1 702 cbnz x10, 1b 703 704 ret 705LEND(build_l2_block_pagetable) 706 707#if PAGE_SIZE != PAGE_SIZE_4K 708/* 709 * Builds an L2 -> L3 table descriptor 710 * 711 * x6 = L2 table 712 * x8 = Virtual Address 713 * x9 = L3 PA (trashed) 714 * x11, x12 and x13 are trashed 715 */ 716LENTRY(link_l2_pagetable) 717 /* 718 * Link an L2 -> L3 table entry. 719 */ 720 /* Find the table index */ 721 lsr x11, x8, #L2_SHIFT 722 and x11, x11, #Ln_ADDR_MASK 723 724 /* Build the L1 block entry */ 725 mov x12, #L2_TABLE 726 727 /* Only use the output address bits */ 728 lsr x9, x9, #PAGE_SHIFT 729 orr x13, x12, x9, lsl #PAGE_SHIFT 730 731 /* Store the entry */ 732 str x13, [x6, x11, lsl #3] 733 734 ret 735LEND(link_l2_pagetable) 736 737/* 738 * Builds count level 3 page table entries 739 * x6 = L3 table 740 * x7 = Block attributes 741 * x8 = VA start 742 * x9 = PA start (trashed) 743 * x10 = Entry count (trashed) 744 * x11, x12 and x13 are trashed 745 */ 746LENTRY(build_l3_page_pagetable) 747 /* 748 * Build the L3 table entry. 749 */ 750 /* Find the table index */ 751 lsr x11, x8, #L3_SHIFT 752 and x11, x11, #Ln_ADDR_MASK 753 754 /* Build the L3 page entry */ 755 orr x12, x7, #L3_PAGE 756 orr x12, x12, #(ATTR_DEFAULT) 757 orr x12, x12, #(ATTR_S1_UXN) 758#ifdef __ARM_FEATURE_BTI_DEFAULT 759 orr x12, x12, #(ATTR_S1_GP) 760#endif 761 762 /* Only use the output address bits */ 763 lsr x9, x9, #L3_SHIFT 764 765 /* Set the physical address for this virtual address */ 7661: orr x13, x12, x9, lsl #L3_SHIFT 767 768 /* Store the entry */ 769 str x13, [x6, x11, lsl #3] 770 771 sub x10, x10, #1 772 add x11, x11, #1 773 add x9, x9, #1 774 cbnz x10, 1b 775 776 ret 777LEND(build_l3_page_pagetable) 778#endif 779 780LENTRY(start_mmu) 781 dsb sy 782 783 /* Load the exception vectors */ 784 ldr x2, =exception_vectors 785 msr vbar_el1, x2 786 787 /* Load ttbr0 and ttbr1 */ 788 msr ttbr0_el1, x27 789 msr ttbr1_el1, x24 790 isb 791 792 /* Clear the Monitor Debug System control register */ 793 msr mdscr_el1, xzr 794 795 /* Invalidate the TLB */ 796 tlbi vmalle1is 797 dsb ish 798 isb 799 800 ldr x2, mair 801 msr mair_el1, x2 802 803 /* 804 * Setup TCR according to the PARange and ASIDBits fields 805 * from ID_AA64MMFR0_EL1 and the HAFDBS field from the 806 * ID_AA64MMFR1_EL1. More precisely, set TCR_EL1.AS 807 * to 1 only if the ASIDBits field equals 0b0010. 808 */ 809 ldr x2, tcr 810 mrs x3, id_aa64mmfr0_el1 811 812 /* Copy the bottom 3 bits from id_aa64mmfr0_el1 into TCR.IPS */ 813 bfi x2, x3, #(TCR_IPS_SHIFT), #(TCR_IPS_WIDTH) 814 and x3, x3, #(ID_AA64MMFR0_ASIDBits_MASK) 815 816 /* Check if the HW supports 16 bit ASIDS */ 817 cmp x3, #(ID_AA64MMFR0_ASIDBits_16) 818 /* If so x3 == 1, else x3 == 0 */ 819 cset x3, eq 820 /* Set TCR.AS with x3 */ 821 bfi x2, x3, #(TCR_ASID_SHIFT), #(TCR_ASID_WIDTH) 822 823 /* 824 * Check if the HW supports access flag and dirty state updates, 825 * and set TCR_EL1.HA and TCR_EL1.HD accordingly. 826 */ 827 mrs x3, id_aa64mmfr1_el1 828 and x3, x3, #(ID_AA64MMFR1_HAFDBS_MASK) 829 cmp x3, #1 830 b.ne 1f 831 orr x2, x2, #(TCR_HA) 832 b 2f 8331: 834 cmp x3, #2 835 b.ne 2f 836 orr x2, x2, #(TCR_HA | TCR_HD) 8372: 838 msr tcr_el1, x2 839 840 /* 841 * Setup SCTLR. 842 */ 843 ldr x2, sctlr_set 844 ldr x3, sctlr_clear 845 mrs x1, sctlr_el1 846 bic x1, x1, x3 /* Clear the required bits */ 847 orr x1, x1, x2 /* Set the required bits */ 848 msr sctlr_el1, x1 849 isb 850 851 ret 852 853 .align 3 854mair: 855 .quad MAIR_ATTR(MAIR_DEVICE_nGnRnE, VM_MEMATTR_DEVICE_nGnRnE) | \ 856 MAIR_ATTR(MAIR_NORMAL_NC, VM_MEMATTR_UNCACHEABLE) | \ 857 MAIR_ATTR(MAIR_NORMAL_WB, VM_MEMATTR_WRITE_BACK) | \ 858 MAIR_ATTR(MAIR_NORMAL_WT, VM_MEMATTR_WRITE_THROUGH) | \ 859 MAIR_ATTR(MAIR_DEVICE_nGnRE, VM_MEMATTR_DEVICE_nGnRE) 860tcr: 861#if PAGE_SIZE == PAGE_SIZE_4K 862#define TCR_TG (TCR_TG1_4K | TCR_TG0_4K) 863#elif PAGE_SIZE == PAGE_SIZE_16K 864#define TCR_TG (TCR_TG1_16K | TCR_TG0_16K) 865#else 866#error Unsupported page size 867#endif 868 869 .quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_TG | \ 870 TCR_CACHE_ATTRS | TCR_SMP_ATTRS) 871sctlr_set: 872 /* Bits to set */ 873 .quad (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_UCI | SCTLR_SPAN | \ 874 SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | SCTLR_DZE | \ 875 SCTLR_I | SCTLR_SED | SCTLR_SA0 | SCTLR_SA | SCTLR_C | \ 876 SCTLR_M | SCTLR_CP15BEN | SCTLR_BT1 | SCTLR_BT0) 877sctlr_clear: 878 /* Bits to clear */ 879 .quad (SCTLR_EE | SCTLR_E0E | SCTLR_IESB | SCTLR_WXN | SCTLR_UMA | \ 880 SCTLR_ITD | SCTLR_A) 881LEND(start_mmu) 882 883ENTRY(abort) 884 b abort 885END(abort) 886 887.bss 888 .align PAGE_SHIFT 889initstack: 890 .space BOOT_STACK_SIZE 891initstack_end: 892 893 .section .init_pagetable, "aw", %nobits 894 .align PAGE_SHIFT 895 /* 896 * 6 initial tables (in the following order): 897 * L2 for kernel (High addresses) 898 * L1 for kernel 899 * L0 for kernel 900 * L1 bootstrap for user (Low addresses) 901 * L0 bootstrap for user 902 * L0 for user 903 */ 904 .globl pagetable_l0_ttbr1 905pagetable: 906#if PAGE_SIZE != PAGE_SIZE_4K 907 .space (PAGE_SIZE * L3_PAGE_COUNT) 908pagetable_l2_ttbr1: 909#endif 910 .space PAGE_SIZE 911pagetable_l1_ttbr1: 912 .space PAGE_SIZE 913pagetable_l0_ttbr1: 914 .space PAGE_SIZE 915pagetable_l2_ttbr0_bootstrap: 916 .space PAGE_SIZE 917pagetable_l1_ttbr0_bootstrap: 918 .space PAGE_SIZE 919pagetable_l0_ttbr0_bootstrap: 920 .space PAGE_SIZE 921pagetable_l0_ttbr0: 922 .space PAGE_SIZE 923pagetable_end: 924 925el2_pagetable: 926 .space PAGE_SIZE 927 928 .section .rodata, "a", %progbits 929 .globl aarch32_sigcode 930 .align 2 931aarch32_sigcode: 932 .word 0xe1a0000d // mov r0, sp 933 .word 0xe2800040 // add r0, r0, #SIGF_UC 934 .word 0xe59f700c // ldr r7, [pc, #12] 935 .word 0xef000000 // swi #0 936 .word 0xe59f7008 // ldr r7, [pc, #8] 937 .word 0xef000000 // swi #0 938 .word 0xeafffffa // b . - 16 939 .word SYS_sigreturn 940 .word SYS_exit 941 .align 3 942 .size aarch32_sigcode, . - aarch32_sigcode 943aarch32_esigcode: 944 .data 945 .global sz_aarch32_sigcode 946sz_aarch32_sigcode: 947 .quad aarch32_esigcode - aarch32_sigcode 948