1/*- 2 * Copyright (c) 2012-2014 Andrew Turner 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include "assym.inc" 28#include "opt_kstack_pages.h" 29#include <sys/syscall.h> 30#include <machine/asm.h> 31#include <machine/armreg.h> 32#include <machine/hypervisor.h> 33#include <machine/param.h> 34#include <machine/pte.h> 35#include <machine/vm.h> 36#include <machine/vmparam.h> 37 38#define VIRT_BITS 48 39 40#if PAGE_SIZE == PAGE_SIZE_16K 41/* 42 * The number of level 3 tables to create. 32 will allow for 1G of address 43 * space, the same as a single level 2 page with 4k pages. 44 */ 45#define L3_PAGE_COUNT 32 46#endif 47 48 .globl kernbase 49 .set kernbase, KERNBASE 50 51/* 52 * We assume: 53 * MMU on with an identity map, or off 54 * D-Cache: off 55 * I-Cache: on or off 56 * We are loaded at a 2MiB aligned address 57 */ 58 59ENTRY(_start) 60 /* Drop to EL1 */ 61 bl drop_to_el1 62 63 /* 64 * Disable the MMU. We may have entered the kernel with it on and 65 * will need to update the tables later. If this has been set up 66 * with anything other than a VA == PA map then this will fail, 67 * but in this case the code to find where we are running from 68 * would have also failed. 69 */ 70 dsb sy 71 mrs x2, sctlr_el1 72 bic x2, x2, SCTLR_M 73 msr sctlr_el1, x2 74 isb 75 76 /* Set the context id */ 77 msr contextidr_el1, xzr 78 79 /* Get the virt -> phys offset */ 80 bl get_virt_delta 81 82 /* 83 * At this point: 84 * x29 = PA - VA 85 * x28 = Our physical load address 86 */ 87 88 /* Create the page tables */ 89 bl create_pagetables 90 91 /* 92 * At this point: 93 * x27 = TTBR0 table 94 * x26 = Kernel L1 table 95 * x24 = TTBR1 table 96 */ 97 98 /* Enable the mmu */ 99 bl start_mmu 100 101 /* Load the new ttbr0 pagetable */ 102 adrp x27, pagetable_l0_ttbr0 103 add x27, x27, :lo12:pagetable_l0_ttbr0 104 105 /* Jump to the virtual address space */ 106 ldr x15, .Lvirtdone 107 br x15 108 109virtdone: 110 BTI_J 111 112 /* Set up the stack */ 113 adrp x25, initstack_end 114 add x25, x25, :lo12:initstack_end 115 sub sp, x25, #PCB_SIZE 116 117 /* Zero the BSS */ 118 ldr x15, .Lbss 119 ldr x14, .Lend 1201: 121 str xzr, [x15], #8 122 cmp x15, x14 123 b.lo 1b 124 125#if defined(PERTHREAD_SSP) 126 /* Set sp_el0 to the boot canary for early per-thread SSP to work */ 127 adrp x15, boot_canary 128 add x15, x15, :lo12:boot_canary 129 msr sp_el0, x15 130#endif 131 132 /* Backup the module pointer */ 133 mov x1, x0 134 135 sub sp, sp, #BOOTPARAMS_SIZE 136 mov x0, sp 137 138 /* Negate the delta so it is VA -> PA */ 139 neg x29, x29 140 141 str x1, [x0, #BP_MODULEP] 142 str x29, [x0, #BP_KERN_DELTA] 143 adrp x25, initstack 144 add x25, x25, :lo12:initstack 145 str x25, [x0, #BP_KERN_STACK] 146 str x27, [x0, #BP_KERN_TTBR0] 147 str x23, [x0, #BP_BOOT_EL] 148 str x4, [x0, #BP_HCR_EL2] 149 150#ifdef KASAN 151 /* Save bootparams */ 152 mov x19, x0 153 154 /* Bootstrap an early shadow map for the boot stack. */ 155 bl pmap_san_bootstrap 156 157 /* Restore bootparams */ 158 mov x0, x19 159#endif 160 161 /* trace back starts here */ 162 mov fp, #0 163 /* Branch to C code */ 164 bl initarm 165 /* We are done with the boot params */ 166 add sp, sp, #BOOTPARAMS_SIZE 167 168 /* 169 * Enable pointer authentication in the kernel. We set the keys for 170 * thread0 in initarm so have to wait until it returns to enable it. 171 * If we were to enable it in initarm then any authentication when 172 * returning would fail as it was called with pointer authentication 173 * disabled. 174 */ 175 bl ptrauth_start 176 177 bl mi_startup 178 179 /* We should not get here */ 180 brk 0 181 182 .align 3 183.Lvirtdone: 184 .quad virtdone 185.Lbss: 186 .quad __bss_start 187.Lend: 188 .quad __bss_end 189END(_start) 190 191#ifdef SMP 192/* 193 * mpentry(unsigned long) 194 * 195 * Called by a core when it is being brought online. 196 * The data in x0 is passed straight to init_secondary. 197 */ 198ENTRY(mpentry) 199 /* Disable interrupts */ 200 msr daifset, #DAIF_INTR 201 202 /* Drop to EL1 */ 203 bl drop_to_el1 204 205 /* Set the context id */ 206 msr contextidr_el1, xzr 207 208 /* Load the kernel page table */ 209 adrp x24, pagetable_l0_ttbr1 210 add x24, x24, :lo12:pagetable_l0_ttbr1 211 /* Load the identity page table */ 212 adrp x27, pagetable_l0_ttbr0_boostrap 213 add x27, x27, :lo12:pagetable_l0_ttbr0_boostrap 214 215 /* Enable the mmu */ 216 bl start_mmu 217 218 /* Load the new ttbr0 pagetable */ 219 adrp x27, pagetable_l0_ttbr0 220 add x27, x27, :lo12:pagetable_l0_ttbr0 221 222 /* Jump to the virtual address space */ 223 ldr x15, =mp_virtdone 224 br x15 225 226mp_virtdone: 227 BTI_J 228 229 /* Start using the AP boot stack */ 230 ldr x4, =bootstack 231 ldr x4, [x4] 232 mov sp, x4 233 234#if defined(PERTHREAD_SSP) 235 /* Set sp_el0 to the boot canary for early per-thread SSP to work */ 236 adrp x15, boot_canary 237 add x15, x15, :lo12:boot_canary 238 msr sp_el0, x15 239#endif 240 241 /* Load the kernel ttbr0 pagetable */ 242 msr ttbr0_el1, x27 243 isb 244 245 /* Invalidate the TLB */ 246 tlbi vmalle1 247 dsb sy 248 isb 249 250 b init_secondary 251END(mpentry) 252#endif 253 254/* 255 * If we are started in EL2, configure the required hypervisor 256 * registers and drop to EL1. 257 */ 258LENTRY(drop_to_el1) 259 mrs x23, CurrentEL 260 lsr x23, x23, #2 261 cmp x23, #0x2 262 b.eq 1f 263 ret 2641: 265 /* 266 * Disable the MMU. If the HCR_EL2.E2H field is set we will clear it 267 * which may break address translation. 268 */ 269 dsb sy 270 mrs x2, sctlr_el2 271 bic x2, x2, SCTLR_M 272 msr sctlr_el2, x2 273 isb 274 275 /* Configure the Hypervisor */ 276 ldr x2, =(HCR_RW | HCR_APK | HCR_API) 277 msr hcr_el2, x2 278 279 /* Stash value of HCR_EL2 for later */ 280 isb 281 mrs x4, hcr_el2 282 283 /* Load the Virtualization Process ID Register */ 284 mrs x2, midr_el1 285 msr vpidr_el2, x2 286 287 /* Load the Virtualization Multiprocess ID Register */ 288 mrs x2, mpidr_el1 289 msr vmpidr_el2, x2 290 291 /* Set the bits that need to be 1 in sctlr_el1 */ 292 ldr x2, .Lsctlr_res1 293 msr sctlr_el1, x2 294 295 /* 296 * On some hardware, e.g., Apple M1, we can't clear E2H, so make sure we 297 * don't trap to EL2 for SIMD register usage to have at least a 298 * minimally usable system. 299 */ 300 tst x4, #HCR_E2H 301 mov x3, #CPTR_RES1 /* HCR_E2H == 0 */ 302 mov x5, #CPTR_FPEN /* HCR_E2H == 1 */ 303 csel x2, x3, x5, eq 304 msr cptr_el2, x2 305 306 /* Don't trap to EL2 for CP15 traps */ 307 msr hstr_el2, xzr 308 309 /* Enable access to the physical timers at EL1 */ 310 mrs x2, cnthctl_el2 311 orr x2, x2, #(CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN) 312 msr cnthctl_el2, x2 313 314 /* Set the counter offset to a known value */ 315 msr cntvoff_el2, xzr 316 317 /* Hypervisor trap functions */ 318 adrp x2, hyp_stub_vectors 319 add x2, x2, :lo12:hyp_stub_vectors 320 msr vbar_el2, x2 321 322 /* Zero vttbr_el2 so a hypervisor can tell the host and guest apart */ 323 msr vttbr_el2, xzr 324 325 mov x2, #(PSR_DAIF | PSR_M_EL1h) 326 msr spsr_el2, x2 327 328 /* Configure GICv3 CPU interface */ 329 mrs x2, id_aa64pfr0_el1 330 /* Extract GIC bits from the register */ 331 ubfx x2, x2, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_BITS 332 /* GIC[3:0] == 0001 - GIC CPU interface via special regs. supported */ 333 cmp x2, #(ID_AA64PFR0_GIC_CPUIF_EN >> ID_AA64PFR0_GIC_SHIFT) 334 b.ne 2f 335 336 mrs x2, icc_sre_el2 337 orr x2, x2, #ICC_SRE_EL2_EN /* Enable access from insecure EL1 */ 338 orr x2, x2, #ICC_SRE_EL2_SRE /* Enable system registers */ 339 msr icc_sre_el2, x2 3402: 341 342 /* Set the address to return to our return address */ 343 msr elr_el2, x30 344 isb 345 346 eret 347 348 .align 3 349.Lsctlr_res1: 350 .quad SCTLR_RES1 351LEND(drop_to_el1) 352 353/* 354 * Get the delta between the physical address we were loaded to and the 355 * virtual address we expect to run from. This is used when building the 356 * initial page table. 357 */ 358LENTRY(get_virt_delta) 359 /* Load the physical address of virt_map */ 360 adrp x29, virt_map 361 add x29, x29, :lo12:virt_map 362 /* Load the virtual address of virt_map stored in virt_map */ 363 ldr x28, [x29] 364 /* Find PA - VA as PA' = VA' - VA + PA = VA' + (PA - VA) = VA' + x29 */ 365 sub x29, x29, x28 366 /* Find the load address for the kernel */ 367 mov x28, #(KERNBASE) 368 add x28, x28, x29 369 ret 370 371 .align 3 372virt_map: 373 .quad virt_map 374LEND(get_virt_delta) 375 376/* 377 * This builds the page tables containing the identity map, and the kernel 378 * virtual map. 379 * 380 * It relys on: 381 * We were loaded to an address that is on a 2MiB boundary 382 * All the memory must not cross a 1GiB boundaty 383 * x28 contains the physical address we were loaded from 384 * 385 * TODO: This is out of date. 386 * There are at least 5 pages before that address for the page tables 387 * The pages used are: 388 * - The Kernel L2 table 389 * - The Kernel L1 table 390 * - The Kernel L0 table (TTBR1) 391 * - The identity (PA = VA) L1 table 392 * - The identity (PA = VA) L0 table (TTBR0) 393 */ 394LENTRY(create_pagetables) 395 /* Save the Link register */ 396 mov x5, x30 397 398 /* Clean the page table */ 399 adrp x6, pagetable 400 add x6, x6, :lo12:pagetable 401 mov x26, x6 402 adrp x27, pagetable_end 403 add x27, x27, :lo12:pagetable_end 4041: 405 stp xzr, xzr, [x6], #16 406 stp xzr, xzr, [x6], #16 407 stp xzr, xzr, [x6], #16 408 stp xzr, xzr, [x6], #16 409 cmp x6, x27 410 b.lo 1b 411 412 /* 413 * Build the TTBR1 maps. 414 */ 415 416 /* Find the size of the kernel */ 417 mov x6, #(KERNBASE) 418 419#if defined(LINUX_BOOT_ABI) 420 /* X19 is used as 'map FDT data' flag */ 421 mov x19, xzr 422 423 /* No modules or FDT pointer ? */ 424 cbz x0, booti_no_fdt 425 426 /* 427 * Test if x0 points to modules descriptor(virtual address) or 428 * to FDT (physical address) 429 */ 430 cmp x0, x6 /* x6 is #(KERNBASE) */ 431 b.lo booti_fdt 432#endif 433 434 /* Booted with modules pointer */ 435 /* Find modulep - begin */ 436 sub x8, x0, x6 437 /* 438 * Add space for the module data. When PAGE_SIZE is 4k this will 439 * add at least 2 level 2 blocks (2 * 2MiB). When PAGE_SIZE is 440 * larger it will be at least as large as we use smaller level 3 441 * pages. 442 */ 443 ldr x7, =((6 * 1024 * 1024) - 1) 444 add x8, x8, x7 445 b common 446 447#if defined(LINUX_BOOT_ABI) 448booti_fdt: 449 /* Booted by U-Boot booti with FDT data */ 450 /* Set 'map FDT data' flag */ 451 mov x19, #1 452 453booti_no_fdt: 454 /* Booted by U-Boot booti without FTD data */ 455 /* Find the end - begin */ 456 ldr x7, .Lend 457 sub x8, x7, x6 458 459 /* 460 * Add one 2MiB page for copy of FDT data (maximum FDT size), 461 * one for metadata and round up 462 */ 463 ldr x7, =(3 * L2_SIZE - 1) 464 add x8, x8, x7 465#endif 466 467common: 468#if PAGE_SIZE != PAGE_SIZE_4K 469 /* 470 * Create L3 pages. The kernel will be loaded at a 2M aligned 471 * address, however L2 blocks are too large when the page size is 472 * not 4k to map the kernel with such an aligned address. However, 473 * when the page size is larger than 4k, L2 blocks are too large to 474 * map the kernel with such an alignment. 475 */ 476 477 /* Get the number of l3 pages to allocate, rounded down */ 478 lsr x10, x8, #(L3_SHIFT) 479 480 /* Create the kernel space L2 table */ 481 mov x6, x26 482 mov x7, #(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) 483 mov x8, #(KERNBASE) 484 mov x9, x28 485 bl build_l3_page_pagetable 486 487 /* Move to the l2 table */ 488 ldr x9, =(PAGE_SIZE * L3_PAGE_COUNT) 489 add x26, x26, x9 490 491 /* Link the l2 -> l3 table */ 492 mov x9, x6 493 mov x6, x26 494 bl link_l2_pagetable 495#else 496 /* Get the number of l2 pages to allocate, rounded down */ 497 lsr x10, x8, #(L2_SHIFT) 498 499 /* Create the kernel space L2 table */ 500 mov x6, x26 501 mov x7, #(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) 502 mov x8, #(KERNBASE) 503 mov x9, x28 504 bl build_l2_block_pagetable 505#endif 506 507 /* Move to the l1 table */ 508 add x26, x26, #PAGE_SIZE 509 510 /* Link the l1 -> l2 table */ 511 mov x9, x6 512 mov x6, x26 513 bl link_l1_pagetable 514 515 /* Move to the l0 table */ 516 add x24, x26, #PAGE_SIZE 517 518 /* Link the l0 -> l1 table */ 519 mov x9, x6 520 mov x6, x24 521 mov x10, #1 522 bl link_l0_pagetable 523 524 /* 525 * Build the TTBR0 maps. As TTBR0 maps, they must specify ATTR_S1_nG. 526 * They are only needed early on, so the VA = PA map is uncached. 527 */ 528 add x27, x24, #PAGE_SIZE 529 530 mov x6, x27 /* The initial page table */ 531 532 /* Create the VA = PA map */ 533 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) 534 adrp x16, _start 535 and x16, x16, #(~L2_OFFSET) 536 mov x9, x16 /* PA start */ 537 mov x8, x16 /* VA start (== PA start) */ 538 mov x10, #1 539 bl build_l2_block_pagetable 540 541#if defined(SOCDEV_PA) 542 /* Create a table for the UART */ 543 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_DEVICE)) 544 ldr x9, =(L2_SIZE) 545 add x16, x16, x9 /* VA start */ 546 mov x8, x16 547 548 /* Store the socdev virtual address */ 549 add x17, x8, #(SOCDEV_PA & L2_OFFSET) 550 adrp x9, socdev_va 551 str x17, [x9, :lo12:socdev_va] 552 553 mov x9, #(SOCDEV_PA & ~L2_OFFSET) /* PA start */ 554 mov x10, #1 555 bl build_l2_block_pagetable 556#endif 557 558#if defined(LINUX_BOOT_ABI) 559 /* Map FDT data ? */ 560 cbz x19, 1f 561 562 /* Create the mapping for FDT data (2 MiB max) */ 563 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) 564 ldr x9, =(L2_SIZE) 565 add x16, x16, x9 /* VA start */ 566 mov x8, x16 567 mov x9, x0 /* PA start */ 568 /* Update the module pointer to point at the allocated memory */ 569 and x0, x0, #(L2_OFFSET) /* Keep the lower bits */ 570 add x0, x0, x8 /* Add the aligned virtual address */ 571 572 mov x10, #1 573 bl build_l2_block_pagetable 574 5751: 576#endif 577 578 /* Move to the l1 table */ 579 add x27, x27, #PAGE_SIZE 580 581 /* Link the l1 -> l2 table */ 582 mov x9, x6 583 mov x6, x27 584 bl link_l1_pagetable 585 586 /* Move to the l0 table */ 587 add x27, x27, #PAGE_SIZE 588 589 /* Link the l0 -> l1 table */ 590 mov x9, x6 591 mov x6, x27 592 mov x10, #1 593 bl link_l0_pagetable 594 595 /* Restore the Link register */ 596 mov x30, x5 597 ret 598LEND(create_pagetables) 599 600/* 601 * Builds an L0 -> L1 table descriptor 602 * 603 * x6 = L0 table 604 * x8 = Virtual Address 605 * x9 = L1 PA (trashed) 606 * x10 = Entry count (trashed) 607 * x11, x12 and x13 are trashed 608 */ 609LENTRY(link_l0_pagetable) 610 /* 611 * Link an L0 -> L1 table entry. 612 */ 613 /* Find the table index */ 614 lsr x11, x8, #L0_SHIFT 615 and x11, x11, #L0_ADDR_MASK 616 617 /* Build the L0 block entry */ 618 mov x12, #L0_TABLE 619 orr x12, x12, #(TATTR_UXN_TABLE | TATTR_AP_TABLE_NO_EL0) 620 621 /* Only use the output address bits */ 622 lsr x9, x9, #PAGE_SHIFT 6231: orr x13, x12, x9, lsl #PAGE_SHIFT 624 625 /* Store the entry */ 626 str x13, [x6, x11, lsl #3] 627 628 sub x10, x10, #1 629 add x11, x11, #1 630 add x9, x9, #1 631 cbnz x10, 1b 632 633 ret 634LEND(link_l0_pagetable) 635 636/* 637 * Builds an L1 -> L2 table descriptor 638 * 639 * x6 = L1 table 640 * x8 = Virtual Address 641 * x9 = L2 PA (trashed) 642 * x11, x12 and x13 are trashed 643 */ 644LENTRY(link_l1_pagetable) 645 /* 646 * Link an L1 -> L2 table entry. 647 */ 648 /* Find the table index */ 649 lsr x11, x8, #L1_SHIFT 650 and x11, x11, #Ln_ADDR_MASK 651 652 /* Build the L1 block entry */ 653 mov x12, #L1_TABLE 654 655 /* Only use the output address bits */ 656 lsr x9, x9, #PAGE_SHIFT 657 orr x13, x12, x9, lsl #PAGE_SHIFT 658 659 /* Store the entry */ 660 str x13, [x6, x11, lsl #3] 661 662 ret 663LEND(link_l1_pagetable) 664 665/* 666 * Builds count 2 MiB page table entry 667 * x6 = L2 table 668 * x7 = Block attributes 669 * x8 = VA start 670 * x9 = PA start (trashed) 671 * x10 = Entry count (trashed) 672 * x11, x12 and x13 are trashed 673 */ 674LENTRY(build_l2_block_pagetable) 675 /* 676 * Build the L2 table entry. 677 */ 678 /* Find the table index */ 679 lsr x11, x8, #L2_SHIFT 680 and x11, x11, #Ln_ADDR_MASK 681 682 /* Build the L2 block entry */ 683 orr x12, x7, #L2_BLOCK 684 orr x12, x12, #(ATTR_DEFAULT) 685 orr x12, x12, #(ATTR_S1_UXN) 686 687 /* Only use the output address bits */ 688 lsr x9, x9, #L2_SHIFT 689 690 /* Set the physical address for this virtual address */ 6911: orr x13, x12, x9, lsl #L2_SHIFT 692 693 /* Store the entry */ 694 str x13, [x6, x11, lsl #3] 695 696 sub x10, x10, #1 697 add x11, x11, #1 698 add x9, x9, #1 699 cbnz x10, 1b 700 701 ret 702LEND(build_l2_block_pagetable) 703 704#if PAGE_SIZE != PAGE_SIZE_4K 705/* 706 * Builds an L2 -> L3 table descriptor 707 * 708 * x6 = L2 table 709 * x8 = Virtual Address 710 * x9 = L3 PA (trashed) 711 * x11, x12 and x13 are trashed 712 */ 713LENTRY(link_l2_pagetable) 714 /* 715 * Link an L2 -> L3 table entry. 716 */ 717 /* Find the table index */ 718 lsr x11, x8, #L2_SHIFT 719 and x11, x11, #Ln_ADDR_MASK 720 721 /* Build the L1 block entry */ 722 mov x12, #L2_TABLE 723 724 /* Only use the output address bits */ 725 lsr x9, x9, #PAGE_SHIFT 726 orr x13, x12, x9, lsl #PAGE_SHIFT 727 728 /* Store the entry */ 729 str x13, [x6, x11, lsl #3] 730 731 ret 732LEND(link_l2_pagetable) 733 734/* 735 * Builds count level 3 page table entries 736 * x6 = L3 table 737 * x7 = Block attributes 738 * x8 = VA start 739 * x9 = PA start (trashed) 740 * x10 = Entry count (trashed) 741 * x11, x12 and x13 are trashed 742 */ 743LENTRY(build_l3_page_pagetable) 744 /* 745 * Build the L3 table entry. 746 */ 747 /* Find the table index */ 748 lsr x11, x8, #L3_SHIFT 749 and x11, x11, #Ln_ADDR_MASK 750 751 /* Build the L3 page entry */ 752 orr x12, x7, #L3_PAGE 753 orr x12, x12, #(ATTR_DEFAULT) 754 orr x12, x12, #(ATTR_S1_UXN) 755 756 /* Only use the output address bits */ 757 lsr x9, x9, #L3_SHIFT 758 759 /* Set the physical address for this virtual address */ 7601: orr x13, x12, x9, lsl #L3_SHIFT 761 762 /* Store the entry */ 763 str x13, [x6, x11, lsl #3] 764 765 sub x10, x10, #1 766 add x11, x11, #1 767 add x9, x9, #1 768 cbnz x10, 1b 769 770 ret 771LEND(build_l3_page_pagetable) 772#endif 773 774LENTRY(start_mmu) 775 dsb sy 776 777 /* Load the exception vectors */ 778 ldr x2, =exception_vectors 779 msr vbar_el1, x2 780 781 /* Load ttbr0 and ttbr1 */ 782 msr ttbr0_el1, x27 783 msr ttbr1_el1, x24 784 isb 785 786 /* Clear the Monitor Debug System control register */ 787 msr mdscr_el1, xzr 788 789 /* Invalidate the TLB */ 790 tlbi vmalle1is 791 dsb ish 792 isb 793 794 ldr x2, mair 795 msr mair_el1, x2 796 797 /* 798 * Setup TCR according to the PARange and ASIDBits fields 799 * from ID_AA64MMFR0_EL1 and the HAFDBS field from the 800 * ID_AA64MMFR1_EL1. More precisely, set TCR_EL1.AS 801 * to 1 only if the ASIDBits field equals 0b0010. 802 */ 803 ldr x2, tcr 804 mrs x3, id_aa64mmfr0_el1 805 806 /* Copy the bottom 3 bits from id_aa64mmfr0_el1 into TCR.IPS */ 807 bfi x2, x3, #(TCR_IPS_SHIFT), #(TCR_IPS_WIDTH) 808 and x3, x3, #(ID_AA64MMFR0_ASIDBits_MASK) 809 810 /* Check if the HW supports 16 bit ASIDS */ 811 cmp x3, #(ID_AA64MMFR0_ASIDBits_16) 812 /* If so x3 == 1, else x3 == 0 */ 813 cset x3, eq 814 /* Set TCR.AS with x3 */ 815 bfi x2, x3, #(TCR_ASID_SHIFT), #(TCR_ASID_WIDTH) 816 817 /* 818 * Check if the HW supports access flag and dirty state updates, 819 * and set TCR_EL1.HA and TCR_EL1.HD accordingly. 820 */ 821 mrs x3, id_aa64mmfr1_el1 822 and x3, x3, #(ID_AA64MMFR1_HAFDBS_MASK) 823 cmp x3, #1 824 b.ne 1f 825 orr x2, x2, #(TCR_HA) 826 b 2f 8271: 828 cmp x3, #2 829 b.ne 2f 830 orr x2, x2, #(TCR_HA | TCR_HD) 8312: 832 msr tcr_el1, x2 833 834 /* 835 * Setup SCTLR. 836 */ 837 ldr x2, sctlr_set 838 ldr x3, sctlr_clear 839 mrs x1, sctlr_el1 840 bic x1, x1, x3 /* Clear the required bits */ 841 orr x1, x1, x2 /* Set the required bits */ 842 msr sctlr_el1, x1 843 isb 844 845 ret 846 847 .align 3 848mair: 849 .quad MAIR_ATTR(MAIR_DEVICE_nGnRnE, VM_MEMATTR_DEVICE_nGnRnE) | \ 850 MAIR_ATTR(MAIR_NORMAL_NC, VM_MEMATTR_UNCACHEABLE) | \ 851 MAIR_ATTR(MAIR_NORMAL_WB, VM_MEMATTR_WRITE_BACK) | \ 852 MAIR_ATTR(MAIR_NORMAL_WT, VM_MEMATTR_WRITE_THROUGH) | \ 853 MAIR_ATTR(MAIR_DEVICE_nGnRE, VM_MEMATTR_DEVICE_nGnRE) 854tcr: 855#if PAGE_SIZE == PAGE_SIZE_4K 856#define TCR_TG (TCR_TG1_4K | TCR_TG0_4K) 857#elif PAGE_SIZE == PAGE_SIZE_16K 858#define TCR_TG (TCR_TG1_16K | TCR_TG0_16K) 859#else 860#error Unsupported page size 861#endif 862 863 .quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_TG | \ 864 TCR_CACHE_ATTRS | TCR_SMP_ATTRS) 865sctlr_set: 866 /* Bits to set */ 867 .quad (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_UCI | SCTLR_SPAN | \ 868 SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | SCTLR_DZE | \ 869 SCTLR_I | SCTLR_SED | SCTLR_SA0 | SCTLR_SA | SCTLR_C | \ 870 SCTLR_M | SCTLR_CP15BEN | SCTLR_BT1 | SCTLR_BT0) 871sctlr_clear: 872 /* Bits to clear */ 873 .quad (SCTLR_EE | SCTLR_E0E | SCTLR_IESB | SCTLR_WXN | SCTLR_UMA | \ 874 SCTLR_ITD | SCTLR_A) 875LEND(start_mmu) 876 877ENTRY(abort) 878 b abort 879END(abort) 880 881.bss 882 .align PAGE_SHIFT 883initstack: 884 .space (PAGE_SIZE * KSTACK_PAGES) 885initstack_end: 886 887 .section .init_pagetable, "aw", %nobits 888 .align PAGE_SHIFT 889 /* 890 * 6 initial tables (in the following order): 891 * L2 for kernel (High addresses) 892 * L1 for kernel 893 * L0 for kernel 894 * L1 bootstrap for user (Low addresses) 895 * L0 bootstrap for user 896 * L0 for user 897 */ 898 .globl pagetable_l0_ttbr1 899pagetable: 900#if PAGE_SIZE != PAGE_SIZE_4K 901 .space (PAGE_SIZE * L3_PAGE_COUNT) 902pagetable_l2_ttbr1: 903#endif 904 .space PAGE_SIZE 905pagetable_l1_ttbr1: 906 .space PAGE_SIZE 907pagetable_l0_ttbr1: 908 .space PAGE_SIZE 909pagetable_l2_ttbr0_bootstrap: 910 .space PAGE_SIZE 911pagetable_l1_ttbr0_bootstrap: 912 .space PAGE_SIZE 913pagetable_l0_ttbr0_boostrap: 914 .space PAGE_SIZE 915pagetable_l0_ttbr0: 916 .space PAGE_SIZE 917pagetable_end: 918 919el2_pagetable: 920 .space PAGE_SIZE 921 922 .section .rodata, "a", %progbits 923 .globl aarch32_sigcode 924 .align 2 925aarch32_sigcode: 926 .word 0xe1a0000d // mov r0, sp 927 .word 0xe2800040 // add r0, r0, #SIGF_UC 928 .word 0xe59f700c // ldr r7, [pc, #12] 929 .word 0xef000000 // swi #0 930 .word 0xe59f7008 // ldr r7, [pc, #8] 931 .word 0xef000000 // swi #0 932 .word 0xeafffffa // b . - 16 933 .word SYS_sigreturn 934 .word SYS_exit 935 .align 3 936 .size aarch32_sigcode, . - aarch32_sigcode 937aarch32_esigcode: 938 .data 939 .global sz_aarch32_sigcode 940sz_aarch32_sigcode: 941 .quad aarch32_esigcode - aarch32_sigcode 942