1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Low-level CPU initialisation 4 * Based on arch/arm/kernel/head.S 5 * 6 * Copyright (C) 1994-2002 Russell King 7 * Copyright (C) 2003-2012 ARM Ltd. 8 * Authors: Catalin Marinas <catalin.marinas@arm.com> 9 * Will Deacon <will.deacon@arm.com> 10 */ 11 12#include <linux/linkage.h> 13#include <linux/init.h> 14#include <linux/irqchip/arm-gic-v3.h> 15#include <linux/pgtable.h> 16 17#include <asm/asm_pointer_auth.h> 18#include <asm/assembler.h> 19#include <asm/boot.h> 20#include <asm/ptrace.h> 21#include <asm/asm-offsets.h> 22#include <asm/cache.h> 23#include <asm/cputype.h> 24#include <asm/elf.h> 25#include <asm/image.h> 26#include <asm/kernel-pgtable.h> 27#include <asm/kvm_arm.h> 28#include <asm/memory.h> 29#include <asm/pgtable-hwdef.h> 30#include <asm/page.h> 31#include <asm/scs.h> 32#include <asm/smp.h> 33#include <asm/sysreg.h> 34#include <asm/thread_info.h> 35#include <asm/virt.h> 36 37#include "efi-header.S" 38 39#define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET) 40 41#if (TEXT_OFFSET & 0xfff) != 0 42#error TEXT_OFFSET must be at least 4KB aligned 43#elif (PAGE_OFFSET & 0x1fffff) != 0 44#error PAGE_OFFSET must be at least 2MB aligned 45#elif TEXT_OFFSET > 0x1fffff 46#error TEXT_OFFSET must be less than 2MB 47#endif 48 49/* 50 * Kernel startup entry point. 51 * --------------------------- 52 * 53 * The requirements are: 54 * MMU = off, D-cache = off, I-cache = on or off, 55 * x0 = physical address to the FDT blob. 56 * 57 * This code is mostly position independent so you call this at 58 * __pa(PAGE_OFFSET + TEXT_OFFSET). 59 * 60 * Note that the callee-saved registers are used for storing variables 61 * that are useful before the MMU is enabled. The allocations are described 62 * in the entry routines. 63 */ 64 __HEAD 65_head: 66 /* 67 * DO NOT MODIFY. Image header expected by Linux boot-loaders. 68 */ 69#ifdef CONFIG_EFI 70 /* 71 * This add instruction has no meaningful effect except that 72 * its opcode forms the magic "MZ" signature required by UEFI. 73 */ 74 add x13, x18, #0x16 75 b primary_entry 76#else 77 b primary_entry // branch to kernel start, magic 78 .long 0 // reserved 79#endif 80 le64sym _kernel_offset_le // Image load offset from start of RAM, little-endian 81 le64sym _kernel_size_le // Effective size of kernel image, little-endian 82 le64sym _kernel_flags_le // Informative flags, little-endian 83 .quad 0 // reserved 84 .quad 0 // reserved 85 .quad 0 // reserved 86 .ascii ARM64_IMAGE_MAGIC // Magic number 87#ifdef CONFIG_EFI 88 .long pe_header - _head // Offset to the PE header. 89 90pe_header: 91 __EFI_PE_HEADER 92#else 93 .long 0 // reserved 94#endif 95 96 __INIT 97 98 /* 99 * The following callee saved general purpose registers are used on the 100 * primary lowlevel boot path: 101 * 102 * Register Scope Purpose 103 * x21 primary_entry() .. start_kernel() FDT pointer passed at boot in x0 104 * x23 primary_entry() .. start_kernel() physical misalignment/KASLR offset 105 * x28 __create_page_tables() callee preserved temp register 106 * x19/x20 __primary_switch() callee preserved temp registers 107 * x24 __primary_switch() .. relocate_kernel() current RELR displacement 108 */ 109SYM_CODE_START(primary_entry) 110 bl preserve_boot_args 111 bl el2_setup // Drop to EL1, w0=cpu_boot_mode 112 adrp x23, __PHYS_OFFSET 113 and x23, x23, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0 114 bl set_cpu_boot_mode_flag 115 bl __create_page_tables 116 /* 117 * The following calls CPU setup code, see arch/arm64/mm/proc.S for 118 * details. 119 * On return, the CPU will be ready for the MMU to be turned on and 120 * the TCR will have been set. 121 */ 122 bl __cpu_setup // initialise processor 123 b __primary_switch 124SYM_CODE_END(primary_entry) 125 126/* 127 * Preserve the arguments passed by the bootloader in x0 .. x3 128 */ 129SYM_CODE_START_LOCAL(preserve_boot_args) 130 mov x21, x0 // x21=FDT 131 132 adr_l x0, boot_args // record the contents of 133 stp x21, x1, [x0] // x0 .. x3 at kernel entry 134 stp x2, x3, [x0, #16] 135 136 dmb sy // needed before dc ivac with 137 // MMU off 138 139 mov x1, #0x20 // 4 x 8 bytes 140 b __inval_dcache_area // tail call 141SYM_CODE_END(preserve_boot_args) 142 143/* 144 * Macro to create a table entry to the next page. 145 * 146 * tbl: page table address 147 * virt: virtual address 148 * shift: #imm page table shift 149 * ptrs: #imm pointers per table page 150 * 151 * Preserves: virt 152 * Corrupts: ptrs, tmp1, tmp2 153 * Returns: tbl -> next level table page address 154 */ 155 .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2 156 add \tmp1, \tbl, #PAGE_SIZE 157 phys_to_pte \tmp2, \tmp1 158 orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type 159 lsr \tmp1, \virt, #\shift 160 sub \ptrs, \ptrs, #1 161 and \tmp1, \tmp1, \ptrs // table index 162 str \tmp2, [\tbl, \tmp1, lsl #3] 163 add \tbl, \tbl, #PAGE_SIZE // next level table page 164 .endm 165 166/* 167 * Macro to populate page table entries, these entries can be pointers to the next level 168 * or last level entries pointing to physical memory. 169 * 170 * tbl: page table address 171 * rtbl: pointer to page table or physical memory 172 * index: start index to write 173 * eindex: end index to write - [index, eindex] written to 174 * flags: flags for pagetable entry to or in 175 * inc: increment to rtbl between each entry 176 * tmp1: temporary variable 177 * 178 * Preserves: tbl, eindex, flags, inc 179 * Corrupts: index, tmp1 180 * Returns: rtbl 181 */ 182 .macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1 183.Lpe\@: phys_to_pte \tmp1, \rtbl 184 orr \tmp1, \tmp1, \flags // tmp1 = table entry 185 str \tmp1, [\tbl, \index, lsl #3] 186 add \rtbl, \rtbl, \inc // rtbl = pa next level 187 add \index, \index, #1 188 cmp \index, \eindex 189 b.ls .Lpe\@ 190 .endm 191 192/* 193 * Compute indices of table entries from virtual address range. If multiple entries 194 * were needed in the previous page table level then the next page table level is assumed 195 * to be composed of multiple pages. (This effectively scales the end index). 196 * 197 * vstart: virtual address of start of range 198 * vend: virtual address of end of range 199 * shift: shift used to transform virtual address into index 200 * ptrs: number of entries in page table 201 * istart: index in table corresponding to vstart 202 * iend: index in table corresponding to vend 203 * count: On entry: how many extra entries were required in previous level, scales 204 * our end index. 205 * On exit: returns how many extra entries required for next page table level 206 * 207 * Preserves: vstart, vend, shift, ptrs 208 * Returns: istart, iend, count 209 */ 210 .macro compute_indices, vstart, vend, shift, ptrs, istart, iend, count 211 lsr \iend, \vend, \shift 212 mov \istart, \ptrs 213 sub \istart, \istart, #1 214 and \iend, \iend, \istart // iend = (vend >> shift) & (ptrs - 1) 215 mov \istart, \ptrs 216 mul \istart, \istart, \count 217 add \iend, \iend, \istart // iend += (count - 1) * ptrs 218 // our entries span multiple tables 219 220 lsr \istart, \vstart, \shift 221 mov \count, \ptrs 222 sub \count, \count, #1 223 and \istart, \istart, \count 224 225 sub \count, \iend, \istart 226 .endm 227 228/* 229 * Map memory for specified virtual address range. Each level of page table needed supports 230 * multiple entries. If a level requires n entries the next page table level is assumed to be 231 * formed from n pages. 232 * 233 * tbl: location of page table 234 * rtbl: address to be used for first level page table entry (typically tbl + PAGE_SIZE) 235 * vstart: start address to map 236 * vend: end address to map - we map [vstart, vend] 237 * flags: flags to use to map last level entries 238 * phys: physical address corresponding to vstart - physical memory is contiguous 239 * pgds: the number of pgd entries 240 * 241 * Temporaries: istart, iend, tmp, count, sv - these need to be different registers 242 * Preserves: vstart, vend, flags 243 * Corrupts: tbl, rtbl, istart, iend, tmp, count, sv 244 */ 245 .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv 246 add \rtbl, \tbl, #PAGE_SIZE 247 mov \sv, \rtbl 248 mov \count, #0 249 compute_indices \vstart, \vend, #PGDIR_SHIFT, \pgds, \istart, \iend, \count 250 populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp 251 mov \tbl, \sv 252 mov \sv, \rtbl 253 254#if SWAPPER_PGTABLE_LEVELS > 3 255 compute_indices \vstart, \vend, #PUD_SHIFT, #PTRS_PER_PUD, \istart, \iend, \count 256 populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp 257 mov \tbl, \sv 258 mov \sv, \rtbl 259#endif 260 261#if SWAPPER_PGTABLE_LEVELS > 2 262 compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #PTRS_PER_PMD, \istart, \iend, \count 263 populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp 264 mov \tbl, \sv 265#endif 266 267 compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #PTRS_PER_PTE, \istart, \iend, \count 268 bic \count, \phys, #SWAPPER_BLOCK_SIZE - 1 269 populate_entries \tbl, \count, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp 270 .endm 271 272/* 273 * Setup the initial page tables. We only setup the barest amount which is 274 * required to get the kernel running. The following sections are required: 275 * - identity mapping to enable the MMU (low address, TTBR0) 276 * - first few MB of the kernel linear mapping to jump to once the MMU has 277 * been enabled 278 */ 279SYM_FUNC_START_LOCAL(__create_page_tables) 280 mov x28, lr 281 282 /* 283 * Invalidate the init page tables to avoid potential dirty cache lines 284 * being evicted. Other page tables are allocated in rodata as part of 285 * the kernel image, and thus are clean to the PoC per the boot 286 * protocol. 287 */ 288 adrp x0, init_pg_dir 289 adrp x1, init_pg_end 290 sub x1, x1, x0 291 bl __inval_dcache_area 292 293 /* 294 * Clear the init page tables. 295 */ 296 adrp x0, init_pg_dir 297 adrp x1, init_pg_end 298 sub x1, x1, x0 2991: stp xzr, xzr, [x0], #16 300 stp xzr, xzr, [x0], #16 301 stp xzr, xzr, [x0], #16 302 stp xzr, xzr, [x0], #16 303 subs x1, x1, #64 304 b.ne 1b 305 306 mov x7, SWAPPER_MM_MMUFLAGS 307 308 /* 309 * Create the identity mapping. 310 */ 311 adrp x0, idmap_pg_dir 312 adrp x3, __idmap_text_start // __pa(__idmap_text_start) 313 314#ifdef CONFIG_ARM64_VA_BITS_52 315 mrs_s x6, SYS_ID_AA64MMFR2_EL1 316 and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT) 317 mov x5, #52 318 cbnz x6, 1f 319#endif 320 mov x5, #VA_BITS_MIN 3211: 322 adr_l x6, vabits_actual 323 str x5, [x6] 324 dmb sy 325 dc ivac, x6 // Invalidate potentially stale cache line 326 327 /* 328 * VA_BITS may be too small to allow for an ID mapping to be created 329 * that covers system RAM if that is located sufficiently high in the 330 * physical address space. So for the ID map, use an extended virtual 331 * range in that case, and configure an additional translation level 332 * if needed. 333 * 334 * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the 335 * entire ID map region can be mapped. As T0SZ == (64 - #bits used), 336 * this number conveniently equals the number of leading zeroes in 337 * the physical address of __idmap_text_end. 338 */ 339 adrp x5, __idmap_text_end 340 clz x5, x5 341 cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough? 342 b.ge 1f // .. then skip VA range extension 343 344 adr_l x6, idmap_t0sz 345 str x5, [x6] 346 dmb sy 347 dc ivac, x6 // Invalidate potentially stale cache line 348 349#if (VA_BITS < 48) 350#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) 351#define EXTRA_PTRS (1 << (PHYS_MASK_SHIFT - EXTRA_SHIFT)) 352 353 /* 354 * If VA_BITS < 48, we have to configure an additional table level. 355 * First, we have to verify our assumption that the current value of 356 * VA_BITS was chosen such that all translation levels are fully 357 * utilised, and that lowering T0SZ will always result in an additional 358 * translation level to be configured. 359 */ 360#if VA_BITS != EXTRA_SHIFT 361#error "Mismatch between VA_BITS and page size/number of translation levels" 362#endif 363 364 mov x4, EXTRA_PTRS 365 create_table_entry x0, x3, EXTRA_SHIFT, x4, x5, x6 366#else 367 /* 368 * If VA_BITS == 48, we don't have to configure an additional 369 * translation level, but the top-level table has more entries. 370 */ 371 mov x4, #1 << (PHYS_MASK_SHIFT - PGDIR_SHIFT) 372 str_l x4, idmap_ptrs_per_pgd, x5 373#endif 3741: 375 ldr_l x4, idmap_ptrs_per_pgd 376 mov x5, x3 // __pa(__idmap_text_start) 377 adr_l x6, __idmap_text_end // __pa(__idmap_text_end) 378 379 map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14 380 381 /* 382 * Map the kernel image (starting with PHYS_OFFSET). 383 */ 384 adrp x0, init_pg_dir 385 mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text) 386 add x5, x5, x23 // add KASLR displacement 387 mov x4, PTRS_PER_PGD 388 adrp x6, _end // runtime __pa(_end) 389 adrp x3, _text // runtime __pa(_text) 390 sub x6, x6, x3 // _end - _text 391 add x6, x6, x5 // runtime __va(_end) 392 393 map_memory x0, x1, x5, x6, x7, x3, x4, x10, x11, x12, x13, x14 394 395 /* 396 * Since the page tables have been populated with non-cacheable 397 * accesses (MMU disabled), invalidate those tables again to 398 * remove any speculatively loaded cache lines. 399 */ 400 dmb sy 401 402 adrp x0, idmap_pg_dir 403 adrp x1, idmap_pg_end 404 sub x1, x1, x0 405 bl __inval_dcache_area 406 407 adrp x0, init_pg_dir 408 adrp x1, init_pg_end 409 sub x1, x1, x0 410 bl __inval_dcache_area 411 412 ret x28 413SYM_FUNC_END(__create_page_tables) 414 415/* 416 * The following fragment of code is executed with the MMU enabled. 417 * 418 * x0 = __PHYS_OFFSET 419 */ 420SYM_FUNC_START_LOCAL(__primary_switched) 421 adrp x4, init_thread_union 422 add sp, x4, #THREAD_SIZE 423 adr_l x5, init_task 424 msr sp_el0, x5 // Save thread_info 425 426#ifdef CONFIG_ARM64_PTR_AUTH 427 __ptrauth_keys_init_cpu x5, x6, x7, x8 428#endif 429 430 adr_l x8, vectors // load VBAR_EL1 with virtual 431 msr vbar_el1, x8 // vector table address 432 isb 433 434 stp xzr, x30, [sp, #-16]! 435 mov x29, sp 436 437#ifdef CONFIG_SHADOW_CALL_STACK 438 adr_l scs_sp, init_shadow_call_stack // Set shadow call stack 439#endif 440 441 str_l x21, __fdt_pointer, x5 // Save FDT pointer 442 443 ldr_l x4, kimage_vaddr // Save the offset between 444 sub x4, x4, x0 // the kernel virtual and 445 str_l x4, kimage_voffset, x5 // physical mappings 446 447 // Clear BSS 448 adr_l x0, __bss_start 449 mov x1, xzr 450 adr_l x2, __bss_stop 451 sub x2, x2, x0 452 bl __pi_memset 453 dsb ishst // Make zero page visible to PTW 454 455#ifdef CONFIG_KASAN 456 bl kasan_early_init 457#endif 458#ifdef CONFIG_RANDOMIZE_BASE 459 tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized? 460 b.ne 0f 461 mov x0, x21 // pass FDT address in x0 462 bl kaslr_early_init // parse FDT for KASLR options 463 cbz x0, 0f // KASLR disabled? just proceed 464 orr x23, x23, x0 // record KASLR offset 465 ldp x29, x30, [sp], #16 // we must enable KASLR, return 466 ret // to __primary_switch() 4670: 468#endif 469 add sp, sp, #16 470 mov x29, #0 471 mov x30, #0 472 b start_kernel 473SYM_FUNC_END(__primary_switched) 474 475 .pushsection ".rodata", "a" 476SYM_DATA_START(kimage_vaddr) 477 .quad _text - TEXT_OFFSET 478SYM_DATA_END(kimage_vaddr) 479EXPORT_SYMBOL(kimage_vaddr) 480 .popsection 481 482/* 483 * end early head section, begin head code that is also used for 484 * hotplug and needs to have the same protections as the text region 485 */ 486 .section ".idmap.text","awx" 487 488/* 489 * If we're fortunate enough to boot at EL2, ensure that the world is 490 * sane before dropping to EL1. 491 * 492 * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if 493 * booted in EL1 or EL2 respectively. 494 */ 495SYM_FUNC_START(el2_setup) 496 msr SPsel, #1 // We want to use SP_EL{1,2} 497 mrs x0, CurrentEL 498 cmp x0, #CurrentEL_EL2 499 b.eq 1f 500 mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1) 501 msr sctlr_el1, x0 502 mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1 503 isb 504 ret 505 5061: mov_q x0, (SCTLR_EL2_RES1 | ENDIAN_SET_EL2) 507 msr sctlr_el2, x0 508 509#ifdef CONFIG_ARM64_VHE 510 /* 511 * Check for VHE being present. For the rest of the EL2 setup, 512 * x2 being non-zero indicates that we do have VHE, and that the 513 * kernel is intended to run at EL2. 514 */ 515 mrs x2, id_aa64mmfr1_el1 516 ubfx x2, x2, #ID_AA64MMFR1_VHE_SHIFT, #4 517#else 518 mov x2, xzr 519#endif 520 521 /* Hyp configuration. */ 522 mov_q x0, HCR_HOST_NVHE_FLAGS 523 cbz x2, set_hcr 524 mov_q x0, HCR_HOST_VHE_FLAGS 525set_hcr: 526 msr hcr_el2, x0 527 isb 528 529 /* 530 * Allow Non-secure EL1 and EL0 to access physical timer and counter. 531 * This is not necessary for VHE, since the host kernel runs in EL2, 532 * and EL0 accesses are configured in the later stage of boot process. 533 * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout 534 * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined 535 * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1 536 * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in 537 * EL2. 538 */ 539 cbnz x2, 1f 540 mrs x0, cnthctl_el2 541 orr x0, x0, #3 // Enable EL1 physical timers 542 msr cnthctl_el2, x0 5431: 544 msr cntvoff_el2, xzr // Clear virtual offset 545 546#ifdef CONFIG_ARM_GIC_V3 547 /* GICv3 system register access */ 548 mrs x0, id_aa64pfr0_el1 549 ubfx x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4 550 cbz x0, 3f 551 552 mrs_s x0, SYS_ICC_SRE_EL2 553 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 554 orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1 555 msr_s SYS_ICC_SRE_EL2, x0 556 isb // Make sure SRE is now set 557 mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back, 558 tbz x0, #0, 3f // and check that it sticks 559 msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults 560 5613: 562#endif 563 564 /* Populate ID registers. */ 565 mrs x0, midr_el1 566 mrs x1, mpidr_el1 567 msr vpidr_el2, x0 568 msr vmpidr_el2, x1 569 570#ifdef CONFIG_COMPAT 571 msr hstr_el2, xzr // Disable CP15 traps to EL2 572#endif 573 574 /* EL2 debug */ 575 mrs x1, id_aa64dfr0_el1 576 sbfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4 577 cmp x0, #1 578 b.lt 4f // Skip if no PMU present 579 mrs x0, pmcr_el0 // Disable debug access traps 580 ubfx x0, x0, #11, #5 // to EL2 and allow access to 5814: 582 csel x3, xzr, x0, lt // all PMU counters from EL1 583 584 /* Statistical profiling */ 585 ubfx x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4 586 cbz x0, 7f // Skip if SPE not present 587 cbnz x2, 6f // VHE? 588 mrs_s x4, SYS_PMBIDR_EL1 // If SPE available at EL2, 589 and x4, x4, #(1 << SYS_PMBIDR_EL1_P_SHIFT) 590 cbnz x4, 5f // then permit sampling of physical 591 mov x4, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \ 592 1 << SYS_PMSCR_EL2_PA_SHIFT) 593 msr_s SYS_PMSCR_EL2, x4 // addresses and physical counter 5945: 595 mov x1, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT) 596 orr x3, x3, x1 // If we don't have VHE, then 597 b 7f // use EL1&0 translation. 5986: // For VHE, use EL2 translation 599 orr x3, x3, #MDCR_EL2_TPMS // and disable access from EL1 6007: 601 msr mdcr_el2, x3 // Configure debug traps 602 603 /* LORegions */ 604 mrs x1, id_aa64mmfr1_el1 605 ubfx x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4 606 cbz x0, 1f 607 msr_s SYS_LORC_EL1, xzr 6081: 609 610 /* Stage-2 translation */ 611 msr vttbr_el2, xzr 612 613 cbz x2, install_el2_stub 614 615 mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 616 isb 617 ret 618 619SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL) 620 /* 621 * When VHE is not in use, early init of EL2 and EL1 needs to be 622 * done here. 623 * When VHE _is_ in use, EL1 will not be used in the host and 624 * requires no configuration, and all non-hyp-specific EL2 setup 625 * will be done via the _EL1 system register aliases in __cpu_setup. 626 */ 627 mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1) 628 msr sctlr_el1, x0 629 630 /* Coprocessor traps. */ 631 mov x0, #0x33ff 632 msr cptr_el2, x0 // Disable copro. traps to EL2 633 634 /* SVE register access */ 635 mrs x1, id_aa64pfr0_el1 636 ubfx x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4 637 cbz x1, 7f 638 639 bic x0, x0, #CPTR_EL2_TZ // Also disable SVE traps 640 msr cptr_el2, x0 // Disable copro. traps to EL2 641 isb 642 mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector 643 msr_s SYS_ZCR_EL2, x1 // length for EL1. 644 645 /* Hypervisor stub */ 6467: adr_l x0, __hyp_stub_vectors 647 msr vbar_el2, x0 648 649 /* spsr */ 650 mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ 651 PSR_MODE_EL1h) 652 msr spsr_el2, x0 653 msr elr_el2, lr 654 mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 655 eret 656SYM_FUNC_END(el2_setup) 657 658/* 659 * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed 660 * in w0. See arch/arm64/include/asm/virt.h for more info. 661 */ 662SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag) 663 adr_l x1, __boot_cpu_mode 664 cmp w0, #BOOT_CPU_MODE_EL2 665 b.ne 1f 666 add x1, x1, #4 6671: str w0, [x1] // This CPU has booted in EL1 668 dmb sy 669 dc ivac, x1 // Invalidate potentially stale cache line 670 ret 671SYM_FUNC_END(set_cpu_boot_mode_flag) 672 673/* 674 * These values are written with the MMU off, but read with the MMU on. 675 * Writers will invalidate the corresponding address, discarding up to a 676 * 'Cache Writeback Granule' (CWG) worth of data. The linker script ensures 677 * sufficient alignment that the CWG doesn't overlap another section. 678 */ 679 .pushsection ".mmuoff.data.write", "aw" 680/* 681 * We need to find out the CPU boot mode long after boot, so we need to 682 * store it in a writable variable. 683 * 684 * This is not in .bss, because we set it sufficiently early that the boot-time 685 * zeroing of .bss would clobber it. 686 */ 687SYM_DATA_START(__boot_cpu_mode) 688 .long BOOT_CPU_MODE_EL2 689 .long BOOT_CPU_MODE_EL1 690SYM_DATA_END(__boot_cpu_mode) 691/* 692 * The booting CPU updates the failed status @__early_cpu_boot_status, 693 * with MMU turned off. 694 */ 695SYM_DATA_START(__early_cpu_boot_status) 696 .quad 0 697SYM_DATA_END(__early_cpu_boot_status) 698 699 .popsection 700 701 /* 702 * This provides a "holding pen" for platforms to hold all secondary 703 * cores are held until we're ready for them to initialise. 704 */ 705SYM_FUNC_START(secondary_holding_pen) 706 bl el2_setup // Drop to EL1, w0=cpu_boot_mode 707 bl set_cpu_boot_mode_flag 708 mrs x0, mpidr_el1 709 mov_q x1, MPIDR_HWID_BITMASK 710 and x0, x0, x1 711 adr_l x3, secondary_holding_pen_release 712pen: ldr x4, [x3] 713 cmp x4, x0 714 b.eq secondary_startup 715 wfe 716 b pen 717SYM_FUNC_END(secondary_holding_pen) 718 719 /* 720 * Secondary entry point that jumps straight into the kernel. Only to 721 * be used where CPUs are brought online dynamically by the kernel. 722 */ 723SYM_FUNC_START(secondary_entry) 724 bl el2_setup // Drop to EL1 725 bl set_cpu_boot_mode_flag 726 b secondary_startup 727SYM_FUNC_END(secondary_entry) 728 729SYM_FUNC_START_LOCAL(secondary_startup) 730 /* 731 * Common entry point for secondary CPUs. 732 */ 733 bl __cpu_secondary_check52bitva 734 bl __cpu_setup // initialise processor 735 adrp x1, swapper_pg_dir 736 bl __enable_mmu 737 ldr x8, =__secondary_switched 738 br x8 739SYM_FUNC_END(secondary_startup) 740 741SYM_FUNC_START_LOCAL(__secondary_switched) 742 adr_l x5, vectors 743 msr vbar_el1, x5 744 isb 745 746 adr_l x0, secondary_data 747 ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack 748 cbz x1, __secondary_too_slow 749 mov sp, x1 750 ldr x2, [x0, #CPU_BOOT_TASK] 751 cbz x2, __secondary_too_slow 752 msr sp_el0, x2 753 scs_load x2, x3 754 mov x29, #0 755 mov x30, #0 756 757#ifdef CONFIG_ARM64_PTR_AUTH 758 ptrauth_keys_init_cpu x2, x3, x4, x5 759#endif 760 761 b secondary_start_kernel 762SYM_FUNC_END(__secondary_switched) 763 764SYM_FUNC_START_LOCAL(__secondary_too_slow) 765 wfe 766 wfi 767 b __secondary_too_slow 768SYM_FUNC_END(__secondary_too_slow) 769 770/* 771 * The booting CPU updates the failed status @__early_cpu_boot_status, 772 * with MMU turned off. 773 * 774 * update_early_cpu_boot_status tmp, status 775 * - Corrupts tmp1, tmp2 776 * - Writes 'status' to __early_cpu_boot_status and makes sure 777 * it is committed to memory. 778 */ 779 780 .macro update_early_cpu_boot_status status, tmp1, tmp2 781 mov \tmp2, #\status 782 adr_l \tmp1, __early_cpu_boot_status 783 str \tmp2, [\tmp1] 784 dmb sy 785 dc ivac, \tmp1 // Invalidate potentially stale cache line 786 .endm 787 788/* 789 * Enable the MMU. 790 * 791 * x0 = SCTLR_EL1 value for turning on the MMU. 792 * x1 = TTBR1_EL1 value 793 * 794 * Returns to the caller via x30/lr. This requires the caller to be covered 795 * by the .idmap.text section. 796 * 797 * Checks if the selected granule size is supported by the CPU. 798 * If it isn't, park the CPU 799 */ 800SYM_FUNC_START(__enable_mmu) 801 mrs x2, ID_AA64MMFR0_EL1 802 ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4 803 cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED 804 b.ne __no_granule_support 805 update_early_cpu_boot_status 0, x2, x3 806 adrp x2, idmap_pg_dir 807 phys_to_ttbr x1, x1 808 phys_to_ttbr x2, x2 809 msr ttbr0_el1, x2 // load TTBR0 810 offset_ttbr1 x1, x3 811 msr ttbr1_el1, x1 // load TTBR1 812 isb 813 msr sctlr_el1, x0 814 isb 815 /* 816 * Invalidate the local I-cache so that any instructions fetched 817 * speculatively from the PoC are discarded, since they may have 818 * been dynamically patched at the PoU. 819 */ 820 ic iallu 821 dsb nsh 822 isb 823 ret 824SYM_FUNC_END(__enable_mmu) 825 826SYM_FUNC_START(__cpu_secondary_check52bitva) 827#ifdef CONFIG_ARM64_VA_BITS_52 828 ldr_l x0, vabits_actual 829 cmp x0, #52 830 b.ne 2f 831 832 mrs_s x0, SYS_ID_AA64MMFR2_EL1 833 and x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT) 834 cbnz x0, 2f 835 836 update_early_cpu_boot_status \ 837 CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_52_BIT_VA, x0, x1 8381: wfe 839 wfi 840 b 1b 841 842#endif 8432: ret 844SYM_FUNC_END(__cpu_secondary_check52bitva) 845 846SYM_FUNC_START_LOCAL(__no_granule_support) 847 /* Indicate that this CPU can't boot and is stuck in the kernel */ 848 update_early_cpu_boot_status \ 849 CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2 8501: 851 wfe 852 wfi 853 b 1b 854SYM_FUNC_END(__no_granule_support) 855 856#ifdef CONFIG_RELOCATABLE 857SYM_FUNC_START_LOCAL(__relocate_kernel) 858 /* 859 * Iterate over each entry in the relocation table, and apply the 860 * relocations in place. 861 */ 862 ldr w9, =__rela_offset // offset to reloc table 863 ldr w10, =__rela_size // size of reloc table 864 865 mov_q x11, KIMAGE_VADDR // default virtual offset 866 add x11, x11, x23 // actual virtual offset 867 add x9, x9, x11 // __va(.rela) 868 add x10, x9, x10 // __va(.rela) + sizeof(.rela) 869 8700: cmp x9, x10 871 b.hs 1f 872 ldp x12, x13, [x9], #24 873 ldr x14, [x9, #-8] 874 cmp w13, #R_AARCH64_RELATIVE 875 b.ne 0b 876 add x14, x14, x23 // relocate 877 str x14, [x12, x23] 878 b 0b 879 8801: 881#ifdef CONFIG_RELR 882 /* 883 * Apply RELR relocations. 884 * 885 * RELR is a compressed format for storing relative relocations. The 886 * encoded sequence of entries looks like: 887 * [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ] 888 * 889 * i.e. start with an address, followed by any number of bitmaps. The 890 * address entry encodes 1 relocation. The subsequent bitmap entries 891 * encode up to 63 relocations each, at subsequent offsets following 892 * the last address entry. 893 * 894 * The bitmap entries must have 1 in the least significant bit. The 895 * assumption here is that an address cannot have 1 in lsb. Odd 896 * addresses are not supported. Any odd addresses are stored in the RELA 897 * section, which is handled above. 898 * 899 * Excluding the least significant bit in the bitmap, each non-zero 900 * bit in the bitmap represents a relocation to be applied to 901 * a corresponding machine word that follows the base address 902 * word. The second least significant bit represents the machine 903 * word immediately following the initial address, and each bit 904 * that follows represents the next word, in linear order. As such, 905 * a single bitmap can encode up to 63 relocations in a 64-bit object. 906 * 907 * In this implementation we store the address of the next RELR table 908 * entry in x9, the address being relocated by the current address or 909 * bitmap entry in x13 and the address being relocated by the current 910 * bit in x14. 911 * 912 * Because addends are stored in place in the binary, RELR relocations 913 * cannot be applied idempotently. We use x24 to keep track of the 914 * currently applied displacement so that we can correctly relocate if 915 * __relocate_kernel is called twice with non-zero displacements (i.e. 916 * if there is both a physical misalignment and a KASLR displacement). 917 */ 918 ldr w9, =__relr_offset // offset to reloc table 919 ldr w10, =__relr_size // size of reloc table 920 add x9, x9, x11 // __va(.relr) 921 add x10, x9, x10 // __va(.relr) + sizeof(.relr) 922 923 sub x15, x23, x24 // delta from previous offset 924 cbz x15, 7f // nothing to do if unchanged 925 mov x24, x23 // save new offset 926 9272: cmp x9, x10 928 b.hs 7f 929 ldr x11, [x9], #8 930 tbnz x11, #0, 3f // branch to handle bitmaps 931 add x13, x11, x23 932 ldr x12, [x13] // relocate address entry 933 add x12, x12, x15 934 str x12, [x13], #8 // adjust to start of bitmap 935 b 2b 936 9373: mov x14, x13 9384: lsr x11, x11, #1 939 cbz x11, 6f 940 tbz x11, #0, 5f // skip bit if not set 941 ldr x12, [x14] // relocate bit 942 add x12, x12, x15 943 str x12, [x14] 944 9455: add x14, x14, #8 // move to next bit's address 946 b 4b 947 9486: /* 949 * Move to the next bitmap's address. 8 is the word size, and 63 is the 950 * number of significant bits in a bitmap entry. 951 */ 952 add x13, x13, #(8 * 63) 953 b 2b 954 9557: 956#endif 957 ret 958 959SYM_FUNC_END(__relocate_kernel) 960#endif 961 962SYM_FUNC_START_LOCAL(__primary_switch) 963#ifdef CONFIG_RANDOMIZE_BASE 964 mov x19, x0 // preserve new SCTLR_EL1 value 965 mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value 966#endif 967 968 adrp x1, init_pg_dir 969 bl __enable_mmu 970#ifdef CONFIG_RELOCATABLE 971#ifdef CONFIG_RELR 972 mov x24, #0 // no RELR displacement yet 973#endif 974 bl __relocate_kernel 975#ifdef CONFIG_RANDOMIZE_BASE 976 ldr x8, =__primary_switched 977 adrp x0, __PHYS_OFFSET 978 blr x8 979 980 /* 981 * If we return here, we have a KASLR displacement in x23 which we need 982 * to take into account by discarding the current kernel mapping and 983 * creating a new one. 984 */ 985 pre_disable_mmu_workaround 986 msr sctlr_el1, x20 // disable the MMU 987 isb 988 bl __create_page_tables // recreate kernel mapping 989 990 tlbi vmalle1 // Remove any stale TLB entries 991 dsb nsh 992 993 msr sctlr_el1, x19 // re-enable the MMU 994 isb 995 ic iallu // flush instructions fetched 996 dsb nsh // via old mapping 997 isb 998 999 bl __relocate_kernel 1000#endif 1001#endif 1002 ldr x8, =__primary_switched 1003 adrp x0, __PHYS_OFFSET 1004 br x8 1005SYM_FUNC_END(__primary_switch) 1006