1/*- 2 * Copyright (c) 2012-2014 Andrew Turner 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29#include "assym.inc" 30#include "opt_kstack_pages.h" 31#include <sys/syscall.h> 32#include <machine/asm.h> 33#include <machine/armreg.h> 34#include <machine/hypervisor.h> 35#include <machine/param.h> 36#include <machine/pte.h> 37#include <machine/vm.h> 38#include <machine/vmparam.h> 39 40#define VIRT_BITS 48 41 42#if PAGE_SIZE == PAGE_SIZE_16K 43/* 44 * The number of level 3 tables to create. 32 will allow for 1G of address 45 * space, the same as a single level 2 page with 4k pages. 46 */ 47#define L3_PAGE_COUNT 32 48#endif 49 50 .globl kernbase 51 .set kernbase, KERNBASE 52 53/* 54 * We assume: 55 * MMU on with an identity map, or off 56 * D-Cache: off 57 * I-Cache: on or off 58 * We are loaded at a 2MiB aligned address 59 */ 60 61ENTRY(_start) 62 /* Drop to EL1 */ 63 bl drop_to_el1 64 65 /* 66 * Disable the MMU. We may have entered the kernel with it on and 67 * will need to update the tables later. If this has been set up 68 * with anything other than a VA == PA map then this will fail, 69 * but in this case the code to find where we are running from 70 * would have also failed. 71 */ 72 dsb sy 73 mrs x2, sctlr_el1 74 bic x2, x2, SCTLR_M 75 msr sctlr_el1, x2 76 isb 77 78 /* Set the context id */ 79 msr contextidr_el1, xzr 80 81 /* Get the virt -> phys offset */ 82 bl get_virt_delta 83 84 /* 85 * At this point: 86 * x29 = PA - VA 87 * x28 = Our physical load address 88 */ 89 90 /* Create the page tables */ 91 bl create_pagetables 92 93 /* 94 * At this point: 95 * x27 = TTBR0 table 96 * x26 = Kernel L1 table 97 * x24 = TTBR1 table 98 */ 99 100 /* Enable the mmu */ 101 bl start_mmu 102 103 /* Load the new ttbr0 pagetable */ 104 adrp x27, pagetable_l0_ttbr0 105 add x27, x27, :lo12:pagetable_l0_ttbr0 106 107 /* Jump to the virtual address space */ 108 ldr x15, .Lvirtdone 109 br x15 110 111virtdone: 112 /* Set up the stack */ 113 adrp x25, initstack_end 114 add x25, x25, :lo12:initstack_end 115 mov sp, x25 116 sub sp, sp, #PCB_SIZE 117 118 /* Zero the BSS */ 119 ldr x15, .Lbss 120 ldr x14, .Lend 1211: 122 str xzr, [x15], #8 123 cmp x15, x14 124 b.lo 1b 125 126#if defined(PERTHREAD_SSP) 127 /* Set sp_el0 to the boot canary for early per-thread SSP to work */ 128 adrp x15, boot_canary 129 add x15, x15, :lo12:boot_canary 130 msr sp_el0, x15 131#endif 132 133 /* Backup the module pointer */ 134 mov x1, x0 135 136 sub sp, sp, #BOOTPARAMS_SIZE 137 mov x0, sp 138 139 /* Degate the delda so it is VA -> PA */ 140 neg x29, x29 141 142 str x1, [x0, #BP_MODULEP] 143 str x29, [x0, #BP_KERN_DELTA] 144 adrp x25, initstack 145 add x25, x25, :lo12:initstack 146 str x25, [x0, #BP_KERN_STACK] 147 str x27, [x0, #BP_KERN_TTBR0] 148 str x23, [x0, #BP_BOOT_EL] 149 150 /* trace back starts here */ 151 mov fp, #0 152 /* Branch to C code */ 153 bl initarm 154 /* We are done with the boot params */ 155 add sp, sp, #BOOTPARAMS_SIZE 156 157 /* 158 * Enable pointer authentication in the kernel. We set the keys for 159 * thread0 in initarm so have to wait until it returns to enable it. 160 * If we were to enable it in initarm then any authentication when 161 * returning would fail as it was called with pointer authentication 162 * disabled. 163 */ 164 bl ptrauth_start 165 166 bl mi_startup 167 168 /* We should not get here */ 169 brk 0 170 171 .align 3 172.Lvirtdone: 173 .quad virtdone 174.Lbss: 175 .quad __bss_start 176.Lend: 177 .quad __bss_end 178END(_start) 179 180#ifdef SMP 181/* 182 * mpentry(unsigned long) 183 * 184 * Called by a core when it is being brought online. 185 * The data in x0 is passed straight to init_secondary. 186 */ 187ENTRY(mpentry) 188 /* Disable interrupts */ 189 msr daifset, #DAIF_INTR 190 191 /* Drop to EL1 */ 192 bl drop_to_el1 193 194 /* Set the context id */ 195 msr contextidr_el1, xzr 196 197 /* Load the kernel page table */ 198 adrp x24, pagetable_l0_ttbr1 199 add x24, x24, :lo12:pagetable_l0_ttbr1 200 /* Load the identity page table */ 201 adrp x27, pagetable_l0_ttbr0_boostrap 202 add x27, x27, :lo12:pagetable_l0_ttbr0_boostrap 203 204 /* Enable the mmu */ 205 bl start_mmu 206 207 /* Load the new ttbr0 pagetable */ 208 adrp x27, pagetable_l0_ttbr0 209 add x27, x27, :lo12:pagetable_l0_ttbr0 210 211 /* Jump to the virtual address space */ 212 ldr x15, =mp_virtdone 213 br x15 214 215mp_virtdone: 216 /* Start using the AP boot stack */ 217 ldr x4, =bootstack 218 ldr x4, [x4] 219 mov sp, x4 220 221#if defined(PERTHREAD_SSP) 222 /* Set sp_el0 to the boot canary for early per-thread SSP to work */ 223 adrp x15, boot_canary 224 add x15, x15, :lo12:boot_canary 225 msr sp_el0, x15 226#endif 227 228 /* Load the kernel ttbr0 pagetable */ 229 msr ttbr0_el1, x27 230 isb 231 232 /* Invalidate the TLB */ 233 tlbi vmalle1 234 dsb sy 235 isb 236 237 b init_secondary 238END(mpentry) 239#endif 240 241/* 242 * If we are started in EL2, configure the required hypervisor 243 * registers and drop to EL1. 244 */ 245LENTRY(drop_to_el1) 246 mrs x23, CurrentEL 247 lsr x23, x23, #2 248 cmp x23, #0x2 249 b.eq 1f 250 ret 2511: 252 /* 253 * Disable the MMU. If the HCR_EL2.E2H field is set we will clear it 254 * which may break address translation. 255 */ 256 dsb sy 257 mrs x2, sctlr_el2 258 bic x2, x2, SCTLR_M 259 msr sctlr_el2, x2 260 isb 261 262 /* Configure the Hypervisor */ 263 ldr x2, =(HCR_RW | HCR_APK | HCR_API) 264 msr hcr_el2, x2 265 266 /* Load the Virtualization Process ID Register */ 267 mrs x2, midr_el1 268 msr vpidr_el2, x2 269 270 /* Load the Virtualization Multiprocess ID Register */ 271 mrs x2, mpidr_el1 272 msr vmpidr_el2, x2 273 274 /* Set the bits that need to be 1 in sctlr_el1 */ 275 ldr x2, .Lsctlr_res1 276 msr sctlr_el1, x2 277 278 /* Don't trap to EL2 for exceptions */ 279 mov x2, #CPTR_RES1 280 msr cptr_el2, x2 281 282 /* Don't trap to EL2 for CP15 traps */ 283 msr hstr_el2, xzr 284 285 /* Enable access to the physical timers at EL1 */ 286 mrs x2, cnthctl_el2 287 orr x2, x2, #(CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN) 288 msr cnthctl_el2, x2 289 290 /* Set the counter offset to a known value */ 291 msr cntvoff_el2, xzr 292 293 /* Hypervisor trap functions */ 294 adrp x2, hyp_stub_vectors 295 add x2, x2, :lo12:hyp_stub_vectors 296 msr vbar_el2, x2 297 298 /* Zero vttbr_el2 so a hypervisor can tell the host and guest apart */ 299 msr vttbr_el2, xzr 300 301 mov x2, #(PSR_F | PSR_I | PSR_A | PSR_D | PSR_M_EL1h) 302 msr spsr_el2, x2 303 304 /* Configure GICv3 CPU interface */ 305 mrs x2, id_aa64pfr0_el1 306 /* Extract GIC bits from the register */ 307 ubfx x2, x2, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_BITS 308 /* GIC[3:0] == 0001 - GIC CPU interface via special regs. supported */ 309 cmp x2, #(ID_AA64PFR0_GIC_CPUIF_EN >> ID_AA64PFR0_GIC_SHIFT) 310 b.ne 2f 311 312 mrs x2, icc_sre_el2 313 orr x2, x2, #ICC_SRE_EL2_EN /* Enable access from insecure EL1 */ 314 orr x2, x2, #ICC_SRE_EL2_SRE /* Enable system registers */ 315 msr icc_sre_el2, x2 3162: 317 318 /* Set the address to return to our return address */ 319 msr elr_el2, x30 320 isb 321 322 eret 323 324 .align 3 325.Lsctlr_res1: 326 .quad SCTLR_RES1 327LEND(drop_to_el1) 328 329/* 330 * Get the delta between the physical address we were loaded to and the 331 * virtual address we expect to run from. This is used when building the 332 * initial page table. 333 */ 334LENTRY(get_virt_delta) 335 /* Load the physical address of virt_map */ 336 adrp x29, virt_map 337 add x29, x29, :lo12:virt_map 338 /* Load the virtual address of virt_map stored in virt_map */ 339 ldr x28, [x29] 340 /* Find PA - VA as PA' = VA' - VA + PA = VA' + (PA - VA) = VA' + x29 */ 341 sub x29, x29, x28 342 /* Find the load address for the kernel */ 343 mov x28, #(KERNBASE) 344 add x28, x28, x29 345 ret 346 347 .align 3 348virt_map: 349 .quad virt_map 350LEND(get_virt_delta) 351 352/* 353 * This builds the page tables containing the identity map, and the kernel 354 * virtual map. 355 * 356 * It relys on: 357 * We were loaded to an address that is on a 2MiB boundary 358 * All the memory must not cross a 1GiB boundaty 359 * x28 contains the physical address we were loaded from 360 * 361 * TODO: This is out of date. 362 * There are at least 5 pages before that address for the page tables 363 * The pages used are: 364 * - The Kernel L2 table 365 * - The Kernel L1 table 366 * - The Kernel L0 table (TTBR1) 367 * - The identity (PA = VA) L1 table 368 * - The identity (PA = VA) L0 table (TTBR0) 369 */ 370LENTRY(create_pagetables) 371 /* Save the Link register */ 372 mov x5, x30 373 374 /* Clean the page table */ 375 adrp x6, pagetable 376 add x6, x6, :lo12:pagetable 377 mov x26, x6 378 adrp x27, pagetable_end 379 add x27, x27, :lo12:pagetable_end 3801: 381 stp xzr, xzr, [x6], #16 382 stp xzr, xzr, [x6], #16 383 stp xzr, xzr, [x6], #16 384 stp xzr, xzr, [x6], #16 385 cmp x6, x27 386 b.lo 1b 387 388 /* 389 * Build the TTBR1 maps. 390 */ 391 392 /* Find the size of the kernel */ 393 mov x6, #(KERNBASE) 394 395#if defined(LINUX_BOOT_ABI) 396 /* X19 is used as 'map FDT data' flag */ 397 mov x19, xzr 398 399 /* No modules or FDT pointer ? */ 400 cbz x0, booti_no_fdt 401 402 /* 403 * Test if x0 points to modules descriptor(virtual address) or 404 * to FDT (physical address) 405 */ 406 cmp x0, x6 /* x6 is #(KERNBASE) */ 407 b.lo booti_fdt 408#endif 409 410 /* Booted with modules pointer */ 411 /* Find modulep - begin */ 412 sub x8, x0, x6 413 /* 414 * Add space for the module data. When PAGE_SIZE is 4k this will 415 * add at least 2 level 2 blocks (2 * 2MiB). When PAGE_SIZE is 416 * larger it will be at least as large as we use smaller level 3 417 * pages. 418 */ 419 ldr x7, =((6 * 1024 * 1024) - 1) 420 add x8, x8, x7 421 b common 422 423#if defined(LINUX_BOOT_ABI) 424booti_fdt: 425 /* Booted by U-Boot booti with FDT data */ 426 /* Set 'map FDT data' flag */ 427 mov x19, #1 428 429booti_no_fdt: 430 /* Booted by U-Boot booti without FTD data */ 431 /* Find the end - begin */ 432 ldr x7, .Lend 433 sub x8, x7, x6 434 435 /* 436 * Add one 2MiB page for copy of FDT data (maximum FDT size), 437 * one for metadata and round up 438 */ 439 ldr x7, =(3 * L2_SIZE - 1) 440 add x8, x8, x7 441#endif 442 443common: 444#if PAGE_SIZE != PAGE_SIZE_4K 445 /* 446 * Create L3 pages. The kernel will be loaded at a 2M aligned 447 * address, however L2 blocks are too large when the page size is 448 * not 4k to map the kernel with such an aligned address. However, 449 * when the page size is larger than 4k, L2 blocks are too large to 450 * map the kernel with such an alignment. 451 */ 452 453 /* Get the number of l3 pages to allocate, rounded down */ 454 lsr x10, x8, #(L3_SHIFT) 455 456 /* Create the kernel space L2 table */ 457 mov x6, x26 458 mov x7, #(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) 459 mov x8, #(KERNBASE) 460 mov x9, x28 461 bl build_l3_page_pagetable 462 463 /* Move to the l2 table */ 464 ldr x9, =(PAGE_SIZE * L3_PAGE_COUNT) 465 add x26, x26, x9 466 467 /* Link the l2 -> l3 table */ 468 mov x9, x6 469 mov x6, x26 470 bl link_l2_pagetable 471#else 472 /* Get the number of l2 pages to allocate, rounded down */ 473 lsr x10, x8, #(L2_SHIFT) 474 475 /* Create the kernel space L2 table */ 476 mov x6, x26 477 mov x7, #(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) 478 mov x8, #(KERNBASE) 479 mov x9, x28 480 bl build_l2_block_pagetable 481#endif 482 483 /* Move to the l1 table */ 484 add x26, x26, #PAGE_SIZE 485 486 /* Link the l1 -> l2 table */ 487 mov x9, x6 488 mov x6, x26 489 bl link_l1_pagetable 490 491 /* Move to the l0 table */ 492 add x24, x26, #PAGE_SIZE 493 494 /* Link the l0 -> l1 table */ 495 mov x9, x6 496 mov x6, x24 497 mov x10, #1 498 bl link_l0_pagetable 499 500 /* 501 * Build the TTBR0 maps. As TTBR0 maps, they must specify ATTR_S1_nG. 502 * They are only needed early on, so the VA = PA map is uncached. 503 */ 504 add x27, x24, #PAGE_SIZE 505 506 mov x6, x27 /* The initial page table */ 507 508 /* Create the VA = PA map */ 509 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) 510 adrp x16, _start 511 and x16, x16, #(~L2_OFFSET) 512 mov x9, x16 /* PA start */ 513 mov x8, x16 /* VA start (== PA start) */ 514 mov x10, #1 515 bl build_l2_block_pagetable 516 517#if defined(SOCDEV_PA) 518 /* Create a table for the UART */ 519 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_DEVICE)) 520 ldr x9, =(L2_SIZE) 521 add x16, x16, x9 /* VA start */ 522 mov x8, x16 523 524 /* Store the socdev virtual address */ 525 add x17, x8, #(SOCDEV_PA & L2_OFFSET) 526 adrp x9, socdev_va 527 str x17, [x9, :lo12:socdev_va] 528 529 mov x9, #(SOCDEV_PA & ~L2_OFFSET) /* PA start */ 530 mov x10, #1 531 bl build_l2_block_pagetable 532#endif 533 534#if defined(LINUX_BOOT_ABI) 535 /* Map FDT data ? */ 536 cbz x19, 1f 537 538 /* Create the mapping for FDT data (2 MiB max) */ 539 mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) 540 ldr x9, =(L2_SIZE) 541 add x16, x16, x9 /* VA start */ 542 mov x8, x16 543 mov x9, x0 /* PA start */ 544 /* Update the module pointer to point at the allocated memory */ 545 and x0, x0, #(L2_OFFSET) /* Keep the lower bits */ 546 add x0, x0, x8 /* Add the aligned virtual address */ 547 548 mov x10, #1 549 bl build_l2_block_pagetable 550 5511: 552#endif 553 554 /* Move to the l1 table */ 555 add x27, x27, #PAGE_SIZE 556 557 /* Link the l1 -> l2 table */ 558 mov x9, x6 559 mov x6, x27 560 bl link_l1_pagetable 561 562 /* Move to the l0 table */ 563 add x27, x27, #PAGE_SIZE 564 565 /* Link the l0 -> l1 table */ 566 mov x9, x6 567 mov x6, x27 568 mov x10, #1 569 bl link_l0_pagetable 570 571 /* Restore the Link register */ 572 mov x30, x5 573 ret 574LEND(create_pagetables) 575 576/* 577 * Builds an L0 -> L1 table descriptor 578 * 579 * x6 = L0 table 580 * x8 = Virtual Address 581 * x9 = L1 PA (trashed) 582 * x10 = Entry count (trashed) 583 * x11, x12 and x13 are trashed 584 */ 585LENTRY(link_l0_pagetable) 586 /* 587 * Link an L0 -> L1 table entry. 588 */ 589 /* Find the table index */ 590 lsr x11, x8, #L0_SHIFT 591 and x11, x11, #L0_ADDR_MASK 592 593 /* Build the L0 block entry */ 594 mov x12, #L0_TABLE 595 orr x12, x12, #(TATTR_UXN_TABLE | TATTR_AP_TABLE_NO_EL0) 596 597 /* Only use the output address bits */ 598 lsr x9, x9, #PAGE_SHIFT 5991: orr x13, x12, x9, lsl #PAGE_SHIFT 600 601 /* Store the entry */ 602 str x13, [x6, x11, lsl #3] 603 604 sub x10, x10, #1 605 add x11, x11, #1 606 add x9, x9, #1 607 cbnz x10, 1b 608 609 ret 610LEND(link_l0_pagetable) 611 612/* 613 * Builds an L1 -> L2 table descriptor 614 * 615 * x6 = L1 table 616 * x8 = Virtual Address 617 * x9 = L2 PA (trashed) 618 * x11, x12 and x13 are trashed 619 */ 620LENTRY(link_l1_pagetable) 621 /* 622 * Link an L1 -> L2 table entry. 623 */ 624 /* Find the table index */ 625 lsr x11, x8, #L1_SHIFT 626 and x11, x11, #Ln_ADDR_MASK 627 628 /* Build the L1 block entry */ 629 mov x12, #L1_TABLE 630 631 /* Only use the output address bits */ 632 lsr x9, x9, #PAGE_SHIFT 633 orr x13, x12, x9, lsl #PAGE_SHIFT 634 635 /* Store the entry */ 636 str x13, [x6, x11, lsl #3] 637 638 ret 639LEND(link_l1_pagetable) 640 641/* 642 * Builds count 2 MiB page table entry 643 * x6 = L2 table 644 * x7 = Block attributes 645 * x8 = VA start 646 * x9 = PA start (trashed) 647 * x10 = Entry count (trashed) 648 * x11, x12 and x13 are trashed 649 */ 650LENTRY(build_l2_block_pagetable) 651 /* 652 * Build the L2 table entry. 653 */ 654 /* Find the table index */ 655 lsr x11, x8, #L2_SHIFT 656 and x11, x11, #Ln_ADDR_MASK 657 658 /* Build the L2 block entry */ 659 orr x12, x7, #L2_BLOCK 660 orr x12, x12, #(ATTR_DEFAULT) 661 orr x12, x12, #(ATTR_S1_UXN) 662 663 /* Only use the output address bits */ 664 lsr x9, x9, #L2_SHIFT 665 666 /* Set the physical address for this virtual address */ 6671: orr x13, x12, x9, lsl #L2_SHIFT 668 669 /* Store the entry */ 670 str x13, [x6, x11, lsl #3] 671 672 sub x10, x10, #1 673 add x11, x11, #1 674 add x9, x9, #1 675 cbnz x10, 1b 676 677 ret 678LEND(build_l2_block_pagetable) 679 680#if PAGE_SIZE != PAGE_SIZE_4K 681/* 682 * Builds an L2 -> L3 table descriptor 683 * 684 * x6 = L2 table 685 * x8 = Virtual Address 686 * x9 = L3 PA (trashed) 687 * x11, x12 and x13 are trashed 688 */ 689LENTRY(link_l2_pagetable) 690 /* 691 * Link an L2 -> L3 table entry. 692 */ 693 /* Find the table index */ 694 lsr x11, x8, #L2_SHIFT 695 and x11, x11, #Ln_ADDR_MASK 696 697 /* Build the L1 block entry */ 698 mov x12, #L2_TABLE 699 700 /* Only use the output address bits */ 701 lsr x9, x9, #PAGE_SHIFT 702 orr x13, x12, x9, lsl #PAGE_SHIFT 703 704 /* Store the entry */ 705 str x13, [x6, x11, lsl #3] 706 707 ret 708LEND(link_l2_pagetable) 709 710/* 711 * Builds count level 3 page table entries 712 * x6 = L3 table 713 * x7 = Block attributes 714 * x8 = VA start 715 * x9 = PA start (trashed) 716 * x10 = Entry count (trashed) 717 * x11, x12 and x13 are trashed 718 */ 719LENTRY(build_l3_page_pagetable) 720 /* 721 * Build the L3 table entry. 722 */ 723 /* Find the table index */ 724 lsr x11, x8, #L3_SHIFT 725 and x11, x11, #Ln_ADDR_MASK 726 727 /* Build the L3 page entry */ 728 orr x12, x7, #L3_PAGE 729 orr x12, x12, #(ATTR_DEFAULT) 730 orr x12, x12, #(ATTR_S1_UXN) 731 732 /* Only use the output address bits */ 733 lsr x9, x9, #L3_SHIFT 734 735 /* Set the physical address for this virtual address */ 7361: orr x13, x12, x9, lsl #L3_SHIFT 737 738 /* Store the entry */ 739 str x13, [x6, x11, lsl #3] 740 741 sub x10, x10, #1 742 add x11, x11, #1 743 add x9, x9, #1 744 cbnz x10, 1b 745 746 ret 747LEND(build_l3_page_pagetable) 748#endif 749 750LENTRY(start_mmu) 751 dsb sy 752 753 /* Load the exception vectors */ 754 ldr x2, =exception_vectors 755 msr vbar_el1, x2 756 757 /* Load ttbr0 and ttbr1 */ 758 msr ttbr0_el1, x27 759 msr ttbr1_el1, x24 760 isb 761 762 /* Clear the Monitor Debug System control register */ 763 msr mdscr_el1, xzr 764 765 /* Invalidate the TLB */ 766 tlbi vmalle1is 767 dsb ish 768 isb 769 770 ldr x2, mair 771 msr mair_el1, x2 772 773 /* 774 * Setup TCR according to the PARange and ASIDBits fields 775 * from ID_AA64MMFR0_EL1 and the HAFDBS field from the 776 * ID_AA64MMFR1_EL1. More precisely, set TCR_EL1.AS 777 * to 1 only if the ASIDBits field equals 0b0010. 778 */ 779 ldr x2, tcr 780 mrs x3, id_aa64mmfr0_el1 781 782 /* Copy the bottom 3 bits from id_aa64mmfr0_el1 into TCR.IPS */ 783 bfi x2, x3, #(TCR_IPS_SHIFT), #(TCR_IPS_WIDTH) 784 and x3, x3, #(ID_AA64MMFR0_ASIDBits_MASK) 785 786 /* Check if the HW supports 16 bit ASIDS */ 787 cmp x3, #(ID_AA64MMFR0_ASIDBits_16) 788 /* If so x3 == 1, else x3 == 0 */ 789 cset x3, eq 790 /* Set TCR.AS with x3 */ 791 bfi x2, x3, #(TCR_ASID_SHIFT), #(TCR_ASID_WIDTH) 792 793 /* 794 * Check if the HW supports access flag and dirty state updates, 795 * and set TCR_EL1.HA and TCR_EL1.HD accordingly. 796 */ 797 mrs x3, id_aa64mmfr1_el1 798 and x3, x3, #(ID_AA64MMFR1_HAFDBS_MASK) 799 cmp x3, #1 800 b.ne 1f 801 orr x2, x2, #(TCR_HA) 802 b 2f 8031: 804 cmp x3, #2 805 b.ne 2f 806 orr x2, x2, #(TCR_HA | TCR_HD) 8072: 808 msr tcr_el1, x2 809 810 /* 811 * Setup SCTLR. 812 */ 813 ldr x2, sctlr_set 814 ldr x3, sctlr_clear 815 mrs x1, sctlr_el1 816 bic x1, x1, x3 /* Clear the required bits */ 817 orr x1, x1, x2 /* Set the required bits */ 818 msr sctlr_el1, x1 819 isb 820 821 ret 822 823 .align 3 824mair: 825 .quad MAIR_ATTR(MAIR_DEVICE_nGnRnE, VM_MEMATTR_DEVICE_nGnRnE) | \ 826 MAIR_ATTR(MAIR_NORMAL_NC, VM_MEMATTR_UNCACHEABLE) | \ 827 MAIR_ATTR(MAIR_NORMAL_WB, VM_MEMATTR_WRITE_BACK) | \ 828 MAIR_ATTR(MAIR_NORMAL_WT, VM_MEMATTR_WRITE_THROUGH) | \ 829 MAIR_ATTR(MAIR_DEVICE_nGnRE, VM_MEMATTR_DEVICE_nGnRE) 830tcr: 831#if PAGE_SIZE == PAGE_SIZE_4K 832#define TCR_TG (TCR_TG1_4K | TCR_TG0_4K) 833#elif PAGE_SIZE == PAGE_SIZE_16K 834#define TCR_TG (TCR_TG1_16K | TCR_TG0_16K) 835#else 836#error Unsupported page size 837#endif 838 839 .quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_TG | \ 840 TCR_CACHE_ATTRS | TCR_SMP_ATTRS) 841sctlr_set: 842 /* Bits to set */ 843 .quad (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_UCI | SCTLR_SPAN | \ 844 SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | SCTLR_DZE | \ 845 SCTLR_I | SCTLR_SED | SCTLR_SA0 | SCTLR_SA | SCTLR_C | \ 846 SCTLR_M | SCTLR_CP15BEN) 847sctlr_clear: 848 /* Bits to clear */ 849 .quad (SCTLR_EE | SCTLR_E0E | SCTLR_IESB | SCTLR_WXN | SCTLR_UMA | \ 850 SCTLR_ITD | SCTLR_A) 851LEND(start_mmu) 852 853ENTRY(abort) 854 b abort 855END(abort) 856 857 .section .init_pagetable, "aw", %nobits 858 .align PAGE_SHIFT 859 /* 860 * 6 initial tables (in the following order): 861 * L2 for kernel (High addresses) 862 * L1 for kernel 863 * L0 for kernel 864 * L1 bootstrap for user (Low addresses) 865 * L0 bootstrap for user 866 * L0 for user 867 */ 868 .globl pagetable_l0_ttbr1 869pagetable: 870#if PAGE_SIZE != PAGE_SIZE_4K 871 .space (PAGE_SIZE * L3_PAGE_COUNT) 872pagetable_l2_ttbr1: 873#endif 874 .space PAGE_SIZE 875pagetable_l1_ttbr1: 876 .space PAGE_SIZE 877pagetable_l0_ttbr1: 878 .space PAGE_SIZE 879pagetable_l2_ttbr0_bootstrap: 880 .space PAGE_SIZE 881pagetable_l1_ttbr0_bootstrap: 882 .space PAGE_SIZE 883pagetable_l0_ttbr0_boostrap: 884 .space PAGE_SIZE 885pagetable_l0_ttbr0: 886 .space PAGE_SIZE 887 888pagetable_end: 889 890el2_pagetable: 891 .space PAGE_SIZE 892 893 .align 4 894initstack: 895 .space (PAGE_SIZE * KSTACK_PAGES) 896initstack_end: 897 898 899.text 900EENTRY(aarch32_sigcode) 901 .word 0xe1a0000d // mov r0, sp 902 .word 0xe2800040 // add r0, r0, #SIGF_UC 903 .word 0xe59f700c // ldr r7, [pc, #12] 904 .word 0xef000000 // swi #0 905 .word 0xe59f7008 // ldr r7, [pc, #8] 906 .word 0xef000000 // swi #0 907 .word 0xeafffffa // b . - 16 908EEND(aarch32_sigcode) 909 .word SYS_sigreturn 910 .word SYS_exit 911 .align 3 912aarch32_esigcode: 913 .data 914 .global sz_aarch32_sigcode 915sz_aarch32_sigcode: 916 .quad aarch32_esigcode - aarch32_sigcode 917