1/*- 2 * Copyright (c) 2012-2014 Andrew Turner 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29#include "assym.inc" 30#include "opt_kstack_pages.h" 31#include <sys/syscall.h> 32#include <machine/asm.h> 33#include <machine/armreg.h> 34#include <machine/hypervisor.h> 35#include <machine/param.h> 36#include <machine/pte.h> 37#include <machine/vm.h> 38#include <machine/vmparam.h> 39 40#define VIRT_BITS 48 41 42#if PAGE_SIZE == PAGE_SIZE_16K 43/* 44 * The number of level 3 tables to create. 32 will allow for 1G of address 45 * space, the same as a single level 2 page with 4k pages. 46 */ 47#define L3_PAGE_COUNT 32 48#endif 49 50 .globl kernbase 51 .set kernbase, KERNBASE 52 53/* 54 * We assume: 55 * MMU on with an identity map, or off 56 * D-Cache: off 57 * I-Cache: on or off 58 * We are loaded at a 2MiB aligned address 59 */ 60 61ENTRY(_start) 62 /* Drop to EL1 */ 63 bl drop_to_el1 64 65 /* 66 * Disable the MMU. We may have entered the kernel with it on and 67 * will need to update the tables later. If this has been set up 68 * with anything other than a VA == PA map then this will fail, 69 * but in this case the code to find where we are running from 70 * would have also failed. 71 */ 72 dsb sy 73 mrs x2, sctlr_el1 74 bic x2, x2, SCTLR_M 75 msr sctlr_el1, x2 76 isb 77 78 /* Set the context id */ 79 msr contextidr_el1, xzr 80 81 /* Get the virt -> phys offset */ 82 bl get_virt_delta 83 84 /* 85 * At this point: 86 * x29 = PA - VA 87 * x28 = Our physical load address 88 */ 89 90 /* Create the page tables */ 91 bl create_pagetables 92 93 /* 94 * At this point: 95 * x27 = TTBR0 table 96 * x26 = Kernel L1 table 97 * x24 = TTBR1 table 98 */ 99 100 /* Enable the mmu */ 101 bl start_mmu 102 103 /* Load the new ttbr0 pagetable */ 104 adrp x27, pagetable_l0_ttbr0 105 add x27, x27, :lo12:pagetable_l0_ttbr0 106 107 /* Jump to the virtual address space */ 108 ldr x15, .Lvirtdone 109 br x15 110 111virtdone: 112 /* Set up the stack */ 113 adrp x25, initstack_end 114 add x25, x25, :lo12:initstack_end 115 mov sp, x25 116 sub sp, sp, #PCB_SIZE 117 118 /* Zero the BSS */ 119 ldr x15, .Lbss 120 ldr x14, .Lend 1211: 122 str xzr, [x15], #8 123 cmp x15, x14 124 b.lo 1b 125 126#if defined(PERTHREAD_SSP) 127 /* Set sp_el0 to the boot canary for early per-thread SSP to work */ 128 adrp x15, boot_canary 129 add x15, x15, :lo12:boot_canary 130 msr sp_el0, x15 131#endif 132 133 /* Backup the module pointer */ 134 mov x1, x0 135 136 sub sp, sp, #BOOTPARAMS_SIZE 137 mov x0, sp 138 139 /* Degate the delda so it is VA -> PA */ 140 neg x29, x29 141 142 str x1, [x0, #BP_MODULEP] 143 str x29, [x0, #BP_KERN_DELTA] 144 adrp x25, initstack 145 add x25, x25, :lo12:initstack 146 str x25, [x0, #BP_KERN_STACK] 147 str x27, [x0, #BP_KERN_TTBR0] 148 str x23, [x0, #BP_BOOT_EL] 149 str x4, [x0, #BP_HCR_EL2] 150 151#ifdef KASAN 152 /* Save bootparams */ 153 mov x19, x0 154 155 /* Bootstrap an early shadow map for the boot stack. */ 156 bl pmap_san_bootstrap 157 158 /* Restore bootparams */ 159 mov x0, x19 160#endif 161 162 /* trace back starts here */ 163 mov fp, #0 164 /* Branch to C code */ 165 bl initarm 166 /* We are done with the boot params */ 167 add sp, sp, #BOOTPARAMS_SIZE 168 169 /* 170 * Enable pointer authentication in the kernel. We set the keys for 171 * thread0 in initarm so have to wait until it returns to enable it. 172 * If we were to enable it in initarm then any authentication when 173 * returning would fail as it was called with pointer authentication 174 * disabled. 175 */ 176 bl ptrauth_start 177 178 bl mi_startup 179 180 /* We should not get here */ 181 brk 0 182 183 .align 3 184.Lvirtdone: 185 .quad virtdone 186.Lbss: 187 .quad __bss_start 188.Lend: 189 .quad __bss_end 190END(_start) 191 192#ifdef SMP 193/* 194 * mpentry(unsigned long) 195 * 196 * Called by a core when it is being brought online. 197 * The data in x0 is passed straight to init_secondary. 198 */ 199ENTRY(mpentry) 200 /* Disable interrupts */ 201 msr daifset, #DAIF_INTR 202 203 /* Drop to EL1 */ 204 bl drop_to_el1 205 206 /* Set the context id */ 207 msr contextidr_el1, xzr 208 209 /* Load the kernel page table */ 210 adrp x24, pagetable_l0_ttbr1 211 add x24, x24, :lo12:pagetable_l0_ttbr1 212 /* Load the identity page table */ 213 adrp x27, pagetable_l0_ttbr0_boostrap 214 add x27, x27, :lo12:pagetable_l0_ttbr0_boostrap 215 216 /* Enable the mmu */ 217 bl start_mmu 218 219 /* Load the new ttbr0 pagetable */ 220 adrp x27, pagetable_l0_ttbr0 221 add x27, x27, :lo12:pagetable_l0_ttbr0 222 223 /* Jump to the virtual address space */ 224 ldr x15, =mp_virtdone 225 br x15 226 227mp_virtdone: 228 /* Start using the AP boot stack */ 229 ldr x4, =bootstack 230 ldr x4, [x4] 231 mov sp, x4 232 233#if defined(PERTHREAD_SSP) 234 /* Set sp_el0 to the boot canary for early per-thread SSP to work */ 235 adrp x15, boot_canary 236 add x15, x15, :lo12:boot_canary 237 msr sp_el0, x15 238#endif 239 240 /* Load the kernel ttbr0 pagetable */ 241 msr ttbr0_el1, x27 242 isb 243 244 /* Invalidate the TLB */ 245 tlbi vmalle1 246 dsb sy 247 isb 248 249 b init_secondary 250END(mpentry) 251#endif 252 253/* 254 * If we are started in EL2, configure the required hypervisor 255 * registers and drop to EL1. 256 */ 257LENTRY(drop_to_el1) 258 mrs x23, CurrentEL 259 lsr x23, x23, #2 260 cmp x23, #0x2 261 b.eq 1f 262 ret 2631: 264 /* 265 * Disable the MMU. If the HCR_EL2.E2H field is set we will clear it 266 * which may break address translation. 267 */ 268 dsb sy 269 mrs x2, sctlr_el2 270 bic x2, x2, SCTLR_M 271 msr sctlr_el2, x2 272 isb 273 274 /* Configure the Hypervisor */ 275 ldr x2, =(HCR_RW | HCR_APK | HCR_API) 276 msr hcr_el2, x2 277 278 /* Stash value of HCR_EL2 for later */ 279 isb 280 mrs x4, hcr_el2 281 282 /* Load the Virtualization Process ID Register */ 283 mrs x2, midr_el1 284 msr vpidr_el2, x2 285 286 /* Load the Virtualization Multiprocess ID Register */ 287 mrs x2, mpidr_el1 288 msr vmpidr_el2, x2 289 290 /* Set the bits that need to be 1 in sctlr_el1 */ 291 ldr x2, .Lsctlr_res1 292 msr sctlr_el1, x2 293 294 /* 295 * On some hardware, e.g., Apple M1, we can't clear E2H, so make sure we 296 * don't trap to EL2 for SIMD register usage to have at least a 297 * minimally usable system. 298 */ 299 tst x4, #HCR_E2H 300 mov x3, #CPTR_RES1 /* HCR_E2H == 0 */ 301 mov x5, #CPTR_FPEN /* HCR_E2H == 1 */ 302 csel x2, x3, x5, eq 303 msr cptr_el2, x2 304 305 /* Don't trap to EL2 for CP15 traps */ 306 msr hstr_el2, xzr 307 308 /* Enable access to the physical timers at EL1 */ 309 mrs x2, cnthctl_el2 310 orr x2, x2, #(CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN) 311 msr cnthctl_el2, x2 312 313 /* Set the counter offset to a known value */ 314 msr cntvoff_el2, xzr 315 316 /* Hypervisor trap functions */ 317 adrp x2, hyp_stub_vectors 318 add x2, x2, :lo12:hyp_stub_vectors 319 msr vbar_el2, x2 320 321 /* Zero vttbr_el2 so a hypervisor can tell the host and guest apart */ 322 msr vttbr_el2, xzr 323 324 mov x2, #(PSR_DAIF | PSR_M_EL1h) 325 msr spsr_el2, x2 326 327 /* Configure GICv3 CPU interface */ 328 mrs x2, id_aa64pfr0_el1 329 /* Extract GIC bits from the register */ 330 ubfx x2, x2, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_BITS 331 /* GIC[3:0] == 0001 - GIC CPU interface via special regs. supported */ 332 cmp x2, #(ID_AA64PFR0_GIC_CPUIF_EN >> ID_AA64PFR0_GIC_SHIFT) 333 b.ne 2f 334 335 mrs x2, icc_sre_el2 336 orr x2, x2, #ICC_SRE_EL2_EN /* Enable access from insecure EL1 */ 337 orr x2, x2, #ICC_SRE_EL2_SRE /* Enable system registers */ 338 msr icc_sre_el2, x2 3392: 340 341 /* Set the address to return to our return address */ 342 msr elr_el2, x30 343 isb 344 345 eret 346 347 .align 3 348.Lsctlr_res1: 349 .quad SCTLR_RES1 350LEND(drop_to_el1) 351 352/* 353 * Get the delta between the physical address we were loaded to and the 354 * virtual address we expect to run from. This is used when building the 355 * initial page table. 356 */ 357LENTRY(get_virt_delta) 358 /* Load the physical address of virt_map */ 359 adrp x29, virt_map 360 add x29, x29, :lo12:virt_map 361 /* Load the virtual address of virt_map stored in virt_map */ 362 ldr x28, [x29] 363 /* Find PA - VA as PA' = VA' - VA + PA = VA' + (PA - VA) = VA' + x29 */ 364 sub x29, x29, x28 365 /* Find the load address for the kernel */ 366 mov x28, #(KERNBASE) 367 add x28, x28, x29 368 ret 369 370 .align 3 371virt_map: 372 .quad virt_map 373LEND(get_virt_delta) 374 375/* 376 * This builds the page tables containing the identity map, and the kernel 377 * virtual map. 378 * 379 * It relys on: 380 * We were loaded to an address that is on a 2MiB boundary 381 * All the memory must not cross a 1GiB boundaty 382 * x28 contains the physical address we were loaded from 383 * 384 * TODO: This is out of date. 385 * There are at least 5 pages before that address for the page tables 386 * The pages used are: 387 * - The Kernel L2 table 388 * - The Kernel L1 table 389 * - The Kernel L0 table (TTBR1) 390 * - The identity (PA = VA) L1 table 391 * - The identity (PA = VA) L0 table (TTBR0) 392 */ 393LENTRY(create_pagetables) 394 /* Save the Link register */ 395 mov x5, x30 396 397 /* Clean the page table */ 398 adrp x6, pagetable 399 add x6, x6, :lo12:pagetable 400 mov x26, x6 401 adrp x27, pagetable_end 402 add x27, x27, :lo12:pagetable_end 4031: 404 stp xzr, xzr, [x6], #16 405 stp xzr, xzr, [x6], #16 406 stp xzr, xzr, [x6], #16 407 stp xzr, xzr, [x6], #16 408 cmp x6, x27 409 b.lo 1b 410 411 /* 412 * Build the TTBR1 maps. 413 */ 414 415 /* Find the size of the kernel */ 416 mov x6, #(KERNBASE) 417 418#if defined(LINUX_BOOT_ABI) 419 /* X19 is used as 'map FDT data' flag */ 420 mov x19, xzr 421 422 /* No modules or FDT pointer ? */ 423 cbz x0, booti_no_fdt 424 425 /* 426 * Test if x0 points to modules descriptor(virtual address) or 427 * to FDT (physical address) 428 */ 429 cmp x0, x6 /* x6 is #(KERNBASE) */ 430 b.lo booti_fdt 431#endif 432 433 /* Booted with modules pointer */ 434 /* Find modulep - begin */ 435 sub x8, x0, x6 436 /* 437 * Add space for the module data. When PAGE_SIZE is 4k this will 438 * add at least 2 level 2 blocks (2 * 2MiB). When PAGE_SIZE is 439 * larger it will be at least as large as we use smaller level 3 440 * pages. 441 */ 442 ldr x7, =((6 * 1024 * 1024) - 1) 443 add x8, x8, x7 444 b common 445 446#if defined(LINUX_BOOT_ABI) 447booti_fdt: 448 /* Booted by U-Boot booti with FDT data */ 449 /* Set 'map FDT data' flag */ 450 mov x19, #1 451 452booti_no_fdt: 453 /* Booted by U-Boot booti without FTD data */ 454 /* Find the end - begin */ 455 ldr x7, .Lend 456 sub x8, x7, x6 457 458 /* 459 * Add one 2MiB page for copy of FDT data (maximum FDT size), 460 * one for metadata and round up 461 */ 462 ldr x7, =(3 * L2_SIZE - 1) 463 add x8, x8, x7 464#endif 465 466common: 467#if PAGE_SIZE != PAGE_SIZE_4K 468 /* 469 * Create L3 pages. The kernel will be loaded at a 2M aligned 470 * address, however L2 blocks are too large when the page size is 471 * not 4k to map the kernel with such an aligned address. However, 472 * when the page size is larger than 4k, L2 blocks are too large to 473 * map the kernel with such an alignment. 474 */ 475 476 /* Get the number of l3 pages to allocate, rounded down */ 477 lsr x10, x8, #(L3_SHIFT) 478 479 /* Create the kernel space L2 table */ 480 mov x6, x26 481 mov x7, #(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) 482 mov x8, #(KERNBASE) 483 mov x9, x28 484 bl build_l3_page_pagetable 485 486 /* Move to the l2 table */ 487 ldr x9, =(PAGE_SIZE * L3_PAGE_COUNT) 488 add x26, x26, x9 489 490 /* Link the l2 -> l3 table */ 491 mov x9, x6 492 mov x6, x26 493 bl link_l2_pagetable 494#else 495 /* Get the number of l2 pages to allocate, rounded down */ 496 lsr x10, x8, #(L2_SHIFT) 497 498 /* Create the kernel space L2 table */ 499 mov x6, x26 500 mov x7, #(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) 501 mov x8, #(KERNBASE) 502 mov x9, x28 503 bl build_l2_block_pagetable 504#endif 505 506 /* Move to the l1 table */ 507 add x26, x26, #PAGE_SIZE 508 509 /* Link the l1 -> l2 table */ 510 mov x9, x6 511 mov x6, x26 512 bl link_l1_pagetable 513 514 /* Move to the l0 table */ 515 add x24, x26, #PAGE_SIZE 516 517 /* Link the l0 -> l1 table */ 518 mov x9, x6 519 mov x6, x24 520 mov x10, #1 521 bl link_l0_pagetable 522 523 /* 524 * Build the TTBR0 maps. As TTBR0 maps, they must specify ATTR_S1_nG. 525 * They are only needed early on, so the VA = PA map is uncached. 526 */ 527 add x27, x24, #PAGE_SIZE 528 529 mov x6, x27 /* The initial page table */ 530 531 /* Create the VA = PA map */ 532 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) 533 adrp x16, _start 534 and x16, x16, #(~L2_OFFSET) 535 mov x9, x16 /* PA start */ 536 mov x8, x16 /* VA start (== PA start) */ 537 mov x10, #1 538 bl build_l2_block_pagetable 539 540#if defined(SOCDEV_PA) 541 /* Create a table for the UART */ 542 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_DEVICE)) 543 ldr x9, =(L2_SIZE) 544 add x16, x16, x9 /* VA start */ 545 mov x8, x16 546 547 /* Store the socdev virtual address */ 548 add x17, x8, #(SOCDEV_PA & L2_OFFSET) 549 adrp x9, socdev_va 550 str x17, [x9, :lo12:socdev_va] 551 552 mov x9, #(SOCDEV_PA & ~L2_OFFSET) /* PA start */ 553 mov x10, #1 554 bl build_l2_block_pagetable 555#endif 556 557#if defined(LINUX_BOOT_ABI) 558 /* Map FDT data ? */ 559 cbz x19, 1f 560 561 /* Create the mapping for FDT data (2 MiB max) */ 562 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) 563 ldr x9, =(L2_SIZE) 564 add x16, x16, x9 /* VA start */ 565 mov x8, x16 566 mov x9, x0 /* PA start */ 567 /* Update the module pointer to point at the allocated memory */ 568 and x0, x0, #(L2_OFFSET) /* Keep the lower bits */ 569 add x0, x0, x8 /* Add the aligned virtual address */ 570 571 mov x10, #1 572 bl build_l2_block_pagetable 573 5741: 575#endif 576 577 /* Move to the l1 table */ 578 add x27, x27, #PAGE_SIZE 579 580 /* Link the l1 -> l2 table */ 581 mov x9, x6 582 mov x6, x27 583 bl link_l1_pagetable 584 585 /* Move to the l0 table */ 586 add x27, x27, #PAGE_SIZE 587 588 /* Link the l0 -> l1 table */ 589 mov x9, x6 590 mov x6, x27 591 mov x10, #1 592 bl link_l0_pagetable 593 594 /* Restore the Link register */ 595 mov x30, x5 596 ret 597LEND(create_pagetables) 598 599/* 600 * Builds an L0 -> L1 table descriptor 601 * 602 * x6 = L0 table 603 * x8 = Virtual Address 604 * x9 = L1 PA (trashed) 605 * x10 = Entry count (trashed) 606 * x11, x12 and x13 are trashed 607 */ 608LENTRY(link_l0_pagetable) 609 /* 610 * Link an L0 -> L1 table entry. 611 */ 612 /* Find the table index */ 613 lsr x11, x8, #L0_SHIFT 614 and x11, x11, #L0_ADDR_MASK 615 616 /* Build the L0 block entry */ 617 mov x12, #L0_TABLE 618 orr x12, x12, #(TATTR_UXN_TABLE | TATTR_AP_TABLE_NO_EL0) 619 620 /* Only use the output address bits */ 621 lsr x9, x9, #PAGE_SHIFT 6221: orr x13, x12, x9, lsl #PAGE_SHIFT 623 624 /* Store the entry */ 625 str x13, [x6, x11, lsl #3] 626 627 sub x10, x10, #1 628 add x11, x11, #1 629 add x9, x9, #1 630 cbnz x10, 1b 631 632 ret 633LEND(link_l0_pagetable) 634 635/* 636 * Builds an L1 -> L2 table descriptor 637 * 638 * x6 = L1 table 639 * x8 = Virtual Address 640 * x9 = L2 PA (trashed) 641 * x11, x12 and x13 are trashed 642 */ 643LENTRY(link_l1_pagetable) 644 /* 645 * Link an L1 -> L2 table entry. 646 */ 647 /* Find the table index */ 648 lsr x11, x8, #L1_SHIFT 649 and x11, x11, #Ln_ADDR_MASK 650 651 /* Build the L1 block entry */ 652 mov x12, #L1_TABLE 653 654 /* Only use the output address bits */ 655 lsr x9, x9, #PAGE_SHIFT 656 orr x13, x12, x9, lsl #PAGE_SHIFT 657 658 /* Store the entry */ 659 str x13, [x6, x11, lsl #3] 660 661 ret 662LEND(link_l1_pagetable) 663 664/* 665 * Builds count 2 MiB page table entry 666 * x6 = L2 table 667 * x7 = Block attributes 668 * x8 = VA start 669 * x9 = PA start (trashed) 670 * x10 = Entry count (trashed) 671 * x11, x12 and x13 are trashed 672 */ 673LENTRY(build_l2_block_pagetable) 674 /* 675 * Build the L2 table entry. 676 */ 677 /* Find the table index */ 678 lsr x11, x8, #L2_SHIFT 679 and x11, x11, #Ln_ADDR_MASK 680 681 /* Build the L2 block entry */ 682 orr x12, x7, #L2_BLOCK 683 orr x12, x12, #(ATTR_DEFAULT) 684 orr x12, x12, #(ATTR_S1_UXN) 685 686 /* Only use the output address bits */ 687 lsr x9, x9, #L2_SHIFT 688 689 /* Set the physical address for this virtual address */ 6901: orr x13, x12, x9, lsl #L2_SHIFT 691 692 /* Store the entry */ 693 str x13, [x6, x11, lsl #3] 694 695 sub x10, x10, #1 696 add x11, x11, #1 697 add x9, x9, #1 698 cbnz x10, 1b 699 700 ret 701LEND(build_l2_block_pagetable) 702 703#if PAGE_SIZE != PAGE_SIZE_4K 704/* 705 * Builds an L2 -> L3 table descriptor 706 * 707 * x6 = L2 table 708 * x8 = Virtual Address 709 * x9 = L3 PA (trashed) 710 * x11, x12 and x13 are trashed 711 */ 712LENTRY(link_l2_pagetable) 713 /* 714 * Link an L2 -> L3 table entry. 715 */ 716 /* Find the table index */ 717 lsr x11, x8, #L2_SHIFT 718 and x11, x11, #Ln_ADDR_MASK 719 720 /* Build the L1 block entry */ 721 mov x12, #L2_TABLE 722 723 /* Only use the output address bits */ 724 lsr x9, x9, #PAGE_SHIFT 725 orr x13, x12, x9, lsl #PAGE_SHIFT 726 727 /* Store the entry */ 728 str x13, [x6, x11, lsl #3] 729 730 ret 731LEND(link_l2_pagetable) 732 733/* 734 * Builds count level 3 page table entries 735 * x6 = L3 table 736 * x7 = Block attributes 737 * x8 = VA start 738 * x9 = PA start (trashed) 739 * x10 = Entry count (trashed) 740 * x11, x12 and x13 are trashed 741 */ 742LENTRY(build_l3_page_pagetable) 743 /* 744 * Build the L3 table entry. 745 */ 746 /* Find the table index */ 747 lsr x11, x8, #L3_SHIFT 748 and x11, x11, #Ln_ADDR_MASK 749 750 /* Build the L3 page entry */ 751 orr x12, x7, #L3_PAGE 752 orr x12, x12, #(ATTR_DEFAULT) 753 orr x12, x12, #(ATTR_S1_UXN) 754 755 /* Only use the output address bits */ 756 lsr x9, x9, #L3_SHIFT 757 758 /* Set the physical address for this virtual address */ 7591: orr x13, x12, x9, lsl #L3_SHIFT 760 761 /* Store the entry */ 762 str x13, [x6, x11, lsl #3] 763 764 sub x10, x10, #1 765 add x11, x11, #1 766 add x9, x9, #1 767 cbnz x10, 1b 768 769 ret 770LEND(build_l3_page_pagetable) 771#endif 772 773LENTRY(start_mmu) 774 dsb sy 775 776 /* Load the exception vectors */ 777 ldr x2, =exception_vectors 778 msr vbar_el1, x2 779 780 /* Load ttbr0 and ttbr1 */ 781 msr ttbr0_el1, x27 782 msr ttbr1_el1, x24 783 isb 784 785 /* Clear the Monitor Debug System control register */ 786 msr mdscr_el1, xzr 787 788 /* Invalidate the TLB */ 789 tlbi vmalle1is 790 dsb ish 791 isb 792 793 ldr x2, mair 794 msr mair_el1, x2 795 796 /* 797 * Setup TCR according to the PARange and ASIDBits fields 798 * from ID_AA64MMFR0_EL1 and the HAFDBS field from the 799 * ID_AA64MMFR1_EL1. More precisely, set TCR_EL1.AS 800 * to 1 only if the ASIDBits field equals 0b0010. 801 */ 802 ldr x2, tcr 803 mrs x3, id_aa64mmfr0_el1 804 805 /* Copy the bottom 3 bits from id_aa64mmfr0_el1 into TCR.IPS */ 806 bfi x2, x3, #(TCR_IPS_SHIFT), #(TCR_IPS_WIDTH) 807 and x3, x3, #(ID_AA64MMFR0_ASIDBits_MASK) 808 809 /* Check if the HW supports 16 bit ASIDS */ 810 cmp x3, #(ID_AA64MMFR0_ASIDBits_16) 811 /* If so x3 == 1, else x3 == 0 */ 812 cset x3, eq 813 /* Set TCR.AS with x3 */ 814 bfi x2, x3, #(TCR_ASID_SHIFT), #(TCR_ASID_WIDTH) 815 816 /* 817 * Check if the HW supports access flag and dirty state updates, 818 * and set TCR_EL1.HA and TCR_EL1.HD accordingly. 819 */ 820 mrs x3, id_aa64mmfr1_el1 821 and x3, x3, #(ID_AA64MMFR1_HAFDBS_MASK) 822 cmp x3, #1 823 b.ne 1f 824 orr x2, x2, #(TCR_HA) 825 b 2f 8261: 827 cmp x3, #2 828 b.ne 2f 829 orr x2, x2, #(TCR_HA | TCR_HD) 8302: 831 msr tcr_el1, x2 832 833 /* 834 * Setup SCTLR. 835 */ 836 ldr x2, sctlr_set 837 ldr x3, sctlr_clear 838 mrs x1, sctlr_el1 839 bic x1, x1, x3 /* Clear the required bits */ 840 orr x1, x1, x2 /* Set the required bits */ 841 msr sctlr_el1, x1 842 isb 843 844 ret 845 846 .align 3 847mair: 848 .quad MAIR_ATTR(MAIR_DEVICE_nGnRnE, VM_MEMATTR_DEVICE_nGnRnE) | \ 849 MAIR_ATTR(MAIR_NORMAL_NC, VM_MEMATTR_UNCACHEABLE) | \ 850 MAIR_ATTR(MAIR_NORMAL_WB, VM_MEMATTR_WRITE_BACK) | \ 851 MAIR_ATTR(MAIR_NORMAL_WT, VM_MEMATTR_WRITE_THROUGH) | \ 852 MAIR_ATTR(MAIR_DEVICE_nGnRE, VM_MEMATTR_DEVICE_nGnRE) 853tcr: 854#if PAGE_SIZE == PAGE_SIZE_4K 855#define TCR_TG (TCR_TG1_4K | TCR_TG0_4K) 856#elif PAGE_SIZE == PAGE_SIZE_16K 857#define TCR_TG (TCR_TG1_16K | TCR_TG0_16K) 858#else 859#error Unsupported page size 860#endif 861 862 .quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_TG | \ 863 TCR_CACHE_ATTRS | TCR_SMP_ATTRS) 864sctlr_set: 865 /* Bits to set */ 866 .quad (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_UCI | SCTLR_SPAN | \ 867 SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | SCTLR_DZE | \ 868 SCTLR_I | SCTLR_SED | SCTLR_SA0 | SCTLR_SA | SCTLR_C | \ 869 SCTLR_M | SCTLR_CP15BEN) 870sctlr_clear: 871 /* Bits to clear */ 872 .quad (SCTLR_EE | SCTLR_E0E | SCTLR_IESB | SCTLR_WXN | SCTLR_UMA | \ 873 SCTLR_ITD | SCTLR_A) 874LEND(start_mmu) 875 876ENTRY(abort) 877 b abort 878END(abort) 879 880.bss 881 .align PAGE_SHIFT 882initstack: 883 .space (PAGE_SIZE * KSTACK_PAGES) 884initstack_end: 885 886 .section .init_pagetable, "aw", %nobits 887 .align PAGE_SHIFT 888 /* 889 * 6 initial tables (in the following order): 890 * L2 for kernel (High addresses) 891 * L1 for kernel 892 * L0 for kernel 893 * L1 bootstrap for user (Low addresses) 894 * L0 bootstrap for user 895 * L0 for user 896 */ 897 .globl pagetable_l0_ttbr1 898pagetable: 899#if PAGE_SIZE != PAGE_SIZE_4K 900 .space (PAGE_SIZE * L3_PAGE_COUNT) 901pagetable_l2_ttbr1: 902#endif 903 .space PAGE_SIZE 904pagetable_l1_ttbr1: 905 .space PAGE_SIZE 906pagetable_l0_ttbr1: 907 .space PAGE_SIZE 908pagetable_l2_ttbr0_bootstrap: 909 .space PAGE_SIZE 910pagetable_l1_ttbr0_bootstrap: 911 .space PAGE_SIZE 912pagetable_l0_ttbr0_boostrap: 913 .space PAGE_SIZE 914pagetable_l0_ttbr0: 915 .space PAGE_SIZE 916pagetable_end: 917 918el2_pagetable: 919 .space PAGE_SIZE 920 921 .section .rodata, "a", %progbits 922 .globl aarch32_sigcode 923 .align 2 924aarch32_sigcode: 925 .word 0xe1a0000d // mov r0, sp 926 .word 0xe2800040 // add r0, r0, #SIGF_UC 927 .word 0xe59f700c // ldr r7, [pc, #12] 928 .word 0xef000000 // swi #0 929 .word 0xe59f7008 // ldr r7, [pc, #8] 930 .word 0xef000000 // swi #0 931 .word 0xeafffffa // b . - 16 932 .word SYS_sigreturn 933 .word SYS_exit 934 .align 3 935 .size aarch32_sigcode, . - aarch32_sigcode 936aarch32_esigcode: 937 .data 938 .global sz_aarch32_sigcode 939sz_aarch32_sigcode: 940 .quad aarch32_esigcode - aarch32_sigcode 941