1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Low-level CPU initialisation 4 * Based on arch/arm/kernel/head.S 5 * 6 * Copyright (C) 1994-2002 Russell King 7 * Copyright (C) 2003-2012 ARM Ltd. 8 * Authors: Catalin Marinas <catalin.marinas@arm.com> 9 * Will Deacon <will.deacon@arm.com> 10 */ 11 12#include <linux/linkage.h> 13#include <linux/init.h> 14#include <linux/pgtable.h> 15 16#include <asm/asm_pointer_auth.h> 17#include <asm/assembler.h> 18#include <asm/boot.h> 19#include <asm/bug.h> 20#include <asm/ptrace.h> 21#include <asm/asm-offsets.h> 22#include <asm/cache.h> 23#include <asm/cputype.h> 24#include <asm/el2_setup.h> 25#include <asm/elf.h> 26#include <asm/image.h> 27#include <asm/kernel-pgtable.h> 28#include <asm/kvm_arm.h> 29#include <asm/memory.h> 30#include <asm/pgtable-hwdef.h> 31#include <asm/page.h> 32#include <asm/scs.h> 33#include <asm/smp.h> 34#include <asm/sysreg.h> 35#include <asm/thread_info.h> 36#include <asm/virt.h> 37 38#include "efi-header.S" 39 40#if (PAGE_OFFSET & 0x1fffff) != 0 41#error PAGE_OFFSET must be at least 2MB aligned 42#endif 43 44/* 45 * Kernel startup entry point. 46 * --------------------------- 47 * 48 * The requirements are: 49 * MMU = off, D-cache = off, I-cache = on or off, 50 * x0 = physical address to the FDT blob. 51 * 52 * Note that the callee-saved registers are used for storing variables 53 * that are useful before the MMU is enabled. The allocations are described 54 * in the entry routines. 55 */ 56 __HEAD 57 /* 58 * DO NOT MODIFY. Image header expected by Linux boot-loaders. 59 */ 60 efi_signature_nop // special NOP to identity as PE/COFF executable 61 b primary_entry // branch to kernel start, magic 62 .quad 0 // Image load offset from start of RAM, little-endian 63 le64sym _kernel_size_le // Effective size of kernel image, little-endian 64 le64sym _kernel_flags_le // Informative flags, little-endian 65 .quad 0 // reserved 66 .quad 0 // reserved 67 .quad 0 // reserved 68 .ascii ARM64_IMAGE_MAGIC // Magic number 69 .long .Lpe_header_offset // Offset to the PE header. 70 71 __EFI_PE_HEADER 72 73 .section ".idmap.text","a" 74 75 /* 76 * The following callee saved general purpose registers are used on the 77 * primary lowlevel boot path: 78 * 79 * Register Scope Purpose 80 * x19 primary_entry() .. start_kernel() whether we entered with the MMU on 81 * x20 primary_entry() .. __primary_switch() CPU boot mode 82 * x21 primary_entry() .. start_kernel() FDT pointer passed at boot in x0 83 */ 84SYM_CODE_START(primary_entry) 85 bl record_mmu_state 86 bl preserve_boot_args 87 88 adrp x1, early_init_stack 89 mov sp, x1 90 mov x29, xzr 91 adrp x0, init_idmap_pg_dir 92 mov x1, xzr 93 bl __pi_create_init_idmap 94 95 /* 96 * If the page tables have been populated with non-cacheable 97 * accesses (MMU disabled), invalidate those tables again to 98 * remove any speculatively loaded cache lines. 99 */ 100 cbnz x19, 0f 101 dmb sy 102 mov x1, x0 // end of used region 103 adrp x0, init_idmap_pg_dir 104 adr_l x2, dcache_inval_poc 105 blr x2 106 b 1f 107 108 /* 109 * If we entered with the MMU and caches on, clean the ID mapped part 110 * of the primary boot code to the PoC so we can safely execute it with 111 * the MMU off. 112 */ 1130: adrp x0, __idmap_text_start 114 adr_l x1, __idmap_text_end 115 adr_l x2, dcache_clean_poc 116 blr x2 117 1181: mov x0, x19 119 bl init_kernel_el // w0=cpu_boot_mode 120 mov x20, x0 121 122 /* 123 * The following calls CPU setup code, see arch/arm64/mm/proc.S for 124 * details. 125 * On return, the CPU will be ready for the MMU to be turned on and 126 * the TCR will have been set. 127 */ 128 bl __cpu_setup // initialise processor 129 b __primary_switch 130SYM_CODE_END(primary_entry) 131 132 __INIT 133SYM_CODE_START_LOCAL(record_mmu_state) 134 mrs x19, CurrentEL 135 cmp x19, #CurrentEL_EL2 136 mrs x19, sctlr_el1 137 b.ne 0f 138 mrs x19, sctlr_el2 1390: 140CPU_LE( tbnz x19, #SCTLR_ELx_EE_SHIFT, 1f ) 141CPU_BE( tbz x19, #SCTLR_ELx_EE_SHIFT, 1f ) 142 tst x19, #SCTLR_ELx_C // Z := (C == 0) 143 and x19, x19, #SCTLR_ELx_M // isolate M bit 144 csel x19, xzr, x19, eq // clear x19 if Z 145 ret 146 147 /* 148 * Set the correct endianness early so all memory accesses issued 149 * before init_kernel_el() occur in the correct byte order. Note that 150 * this means the MMU must be disabled, or the active ID map will end 151 * up getting interpreted with the wrong byte order. 152 */ 1531: eor x19, x19, #SCTLR_ELx_EE 154 bic x19, x19, #SCTLR_ELx_M 155 b.ne 2f 156 pre_disable_mmu_workaround 157 msr sctlr_el2, x19 158 b 3f 1592: pre_disable_mmu_workaround 160 msr sctlr_el1, x19 1613: isb 162 mov x19, xzr 163 ret 164SYM_CODE_END(record_mmu_state) 165 166/* 167 * Preserve the arguments passed by the bootloader in x0 .. x3 168 */ 169SYM_CODE_START_LOCAL(preserve_boot_args) 170 mov x21, x0 // x21=FDT 171 172 adr_l x0, boot_args // record the contents of 173 stp x21, x1, [x0] // x0 .. x3 at kernel entry 174 stp x2, x3, [x0, #16] 175 176 cbnz x19, 0f // skip cache invalidation if MMU is on 177 dmb sy // needed before dc ivac with 178 // MMU off 179 180 add x1, x0, #0x20 // 4 x 8 bytes 181 b dcache_inval_poc // tail call 1820: str_l x19, mmu_enabled_at_boot, x0 183 ret 184SYM_CODE_END(preserve_boot_args) 185 186 /* 187 * Initialize CPU registers with task-specific and cpu-specific context. 188 * 189 * Create a final frame record at task_pt_regs(current)->stackframe, so 190 * that the unwinder can identify the final frame record of any task by 191 * its location in the task stack. We reserve the entire pt_regs space 192 * for consistency with user tasks and kthreads. 193 */ 194 .macro init_cpu_task tsk, tmp1, tmp2 195 msr sp_el0, \tsk 196 197 ldr \tmp1, [\tsk, #TSK_STACK] 198 add sp, \tmp1, #THREAD_SIZE 199 sub sp, sp, #PT_REGS_SIZE 200 201 stp xzr, xzr, [sp, #S_STACKFRAME] 202 add x29, sp, #S_STACKFRAME 203 204 scs_load_current 205 206 adr_l \tmp1, __per_cpu_offset 207 ldr w\tmp2, [\tsk, #TSK_TI_CPU] 208 ldr \tmp1, [\tmp1, \tmp2, lsl #3] 209 set_this_cpu_offset \tmp1 210 .endm 211 212/* 213 * The following fragment of code is executed with the MMU enabled. 214 * 215 * x0 = __pa(KERNEL_START) 216 */ 217SYM_FUNC_START_LOCAL(__primary_switched) 218 adr_l x4, init_task 219 init_cpu_task x4, x5, x6 220 221 adr_l x8, vectors // load VBAR_EL1 with virtual 222 msr vbar_el1, x8 // vector table address 223 isb 224 225 stp x29, x30, [sp, #-16]! 226 mov x29, sp 227 228 str_l x21, __fdt_pointer, x5 // Save FDT pointer 229 230 adrp x4, _text // Save the offset between 231 sub x4, x4, x0 // the kernel virtual and 232 str_l x4, kimage_voffset, x5 // physical mappings 233 234 mov x0, x20 235 bl set_cpu_boot_mode_flag 236 237#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 238 bl kasan_early_init 239#endif 240 mov x0, x20 241 bl finalise_el2 // Prefer VHE if possible 242 ldp x29, x30, [sp], #16 243 bl start_kernel 244 ASM_BUG() 245SYM_FUNC_END(__primary_switched) 246 247/* 248 * end early head section, begin head code that is also used for 249 * hotplug and needs to have the same protections as the text region 250 */ 251 .section ".idmap.text","a" 252 253/* 254 * Starting from EL2 or EL1, configure the CPU to execute at the highest 255 * reachable EL supported by the kernel in a chosen default state. If dropping 256 * from EL2 to EL1, configure EL2 before configuring EL1. 257 * 258 * Since we cannot always rely on ERET synchronizing writes to sysregs (e.g. if 259 * SCTLR_ELx.EOS is clear), we place an ISB prior to ERET. 260 * 261 * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x0 if 262 * booted in EL1 or EL2 respectively, with the top 32 bits containing 263 * potential context flags. These flags are *not* stored in __boot_cpu_mode. 264 * 265 * x0: whether we are being called from the primary boot path with the MMU on 266 */ 267SYM_FUNC_START(init_kernel_el) 268 mrs x1, CurrentEL 269 cmp x1, #CurrentEL_EL2 270 b.eq init_el2 271 272SYM_INNER_LABEL(init_el1, SYM_L_LOCAL) 273 mov_q x0, INIT_SCTLR_EL1_MMU_OFF 274 pre_disable_mmu_workaround 275 msr sctlr_el1, x0 276 isb 277 mov_q x0, INIT_PSTATE_EL1 278 msr spsr_el1, x0 279 msr elr_el1, lr 280 mov w0, #BOOT_CPU_MODE_EL1 281 eret 282 283SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) 284 msr elr_el2, lr 285 286 // clean all HYP code to the PoC if we booted at EL2 with the MMU on 287 cbz x0, 0f 288 adrp x0, __hyp_idmap_text_start 289 adr_l x1, __hyp_text_end 290 adr_l x2, dcache_clean_poc 291 blr x2 2920: 293 mov_q x0, HCR_HOST_NVHE_FLAGS 294 msr hcr_el2, x0 295 isb 296 297 init_el2_state 298 299 /* Hypervisor stub */ 300 adr_l x0, __hyp_stub_vectors 301 msr vbar_el2, x0 302 isb 303 304 mov_q x1, INIT_SCTLR_EL1_MMU_OFF 305 306 /* 307 * Compliant CPUs advertise their VHE-onlyness with 308 * ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2.E2H can be 309 * RES1 in that case. 310 * 311 * Fruity CPUs seem to have HCR_EL2.E2H set to RES1, but 312 * don't advertise it (they predate this relaxation). 313 */ 314 mrs_s x0, SYS_ID_AA64MMFR4_EL1 315 ubfx x0, x0, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH 316 tbnz x0, #(ID_AA64MMFR4_EL1_E2H0_SHIFT + ID_AA64MMFR4_EL1_E2H0_WIDTH - 1), 1f 317 318 mrs x0, hcr_el2 319 and x0, x0, #HCR_E2H 320 cbz x0, 2f 3211: 322 /* Set a sane SCTLR_EL1, the VHE way */ 323 pre_disable_mmu_workaround 324 msr_s SYS_SCTLR_EL12, x1 325 mov x2, #BOOT_CPU_FLAG_E2H 326 b 3f 327 3282: 329 pre_disable_mmu_workaround 330 msr sctlr_el1, x1 331 mov x2, xzr 3323: 333 __init_el2_nvhe_prepare_eret 334 335 mov w0, #BOOT_CPU_MODE_EL2 336 orr x0, x0, x2 337 eret 338SYM_FUNC_END(init_kernel_el) 339 340 /* 341 * This provides a "holding pen" for platforms to hold all secondary 342 * cores are held until we're ready for them to initialise. 343 */ 344SYM_FUNC_START(secondary_holding_pen) 345 mov x0, xzr 346 bl init_kernel_el // w0=cpu_boot_mode 347 mrs x2, mpidr_el1 348 mov_q x1, MPIDR_HWID_BITMASK 349 and x2, x2, x1 350 adr_l x3, secondary_holding_pen_release 351pen: ldr x4, [x3] 352 cmp x4, x2 353 b.eq secondary_startup 354 wfe 355 b pen 356SYM_FUNC_END(secondary_holding_pen) 357 358 /* 359 * Secondary entry point that jumps straight into the kernel. Only to 360 * be used where CPUs are brought online dynamically by the kernel. 361 */ 362SYM_FUNC_START(secondary_entry) 363 mov x0, xzr 364 bl init_kernel_el // w0=cpu_boot_mode 365 b secondary_startup 366SYM_FUNC_END(secondary_entry) 367 368SYM_FUNC_START_LOCAL(secondary_startup) 369 /* 370 * Common entry point for secondary CPUs. 371 */ 372 mov x20, x0 // preserve boot mode 373 374#ifdef CONFIG_ARM64_VA_BITS_52 375alternative_if ARM64_HAS_VA52 376 bl __cpu_secondary_check52bitva 377alternative_else_nop_endif 378#endif 379 380 bl __cpu_setup // initialise processor 381 adrp x1, swapper_pg_dir 382 adrp x2, idmap_pg_dir 383 bl __enable_mmu 384 ldr x8, =__secondary_switched 385 br x8 386SYM_FUNC_END(secondary_startup) 387 388 .text 389SYM_FUNC_START_LOCAL(__secondary_switched) 390 mov x0, x20 391 bl set_cpu_boot_mode_flag 392 393 mov x0, x20 394 bl finalise_el2 395 396 str_l xzr, __early_cpu_boot_status, x3 397 adr_l x5, vectors 398 msr vbar_el1, x5 399 isb 400 401 adr_l x0, secondary_data 402 ldr x2, [x0, #CPU_BOOT_TASK] 403 cbz x2, __secondary_too_slow 404 405 init_cpu_task x2, x1, x3 406 407#ifdef CONFIG_ARM64_PTR_AUTH 408 ptrauth_keys_init_cpu x2, x3, x4, x5 409#endif 410 411 bl secondary_start_kernel 412 ASM_BUG() 413SYM_FUNC_END(__secondary_switched) 414 415SYM_FUNC_START_LOCAL(__secondary_too_slow) 416 wfe 417 wfi 418 b __secondary_too_slow 419SYM_FUNC_END(__secondary_too_slow) 420 421/* 422 * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed 423 * in w0. See arch/arm64/include/asm/virt.h for more info. 424 */ 425SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag) 426 adr_l x1, __boot_cpu_mode 427 cmp w0, #BOOT_CPU_MODE_EL2 428 b.ne 1f 429 add x1, x1, #4 4301: str w0, [x1] // Save CPU boot mode 431 ret 432SYM_FUNC_END(set_cpu_boot_mode_flag) 433 434/* 435 * The booting CPU updates the failed status @__early_cpu_boot_status, 436 * with MMU turned off. 437 * 438 * update_early_cpu_boot_status tmp, status 439 * - Corrupts tmp1, tmp2 440 * - Writes 'status' to __early_cpu_boot_status and makes sure 441 * it is committed to memory. 442 */ 443 444 .macro update_early_cpu_boot_status status, tmp1, tmp2 445 mov \tmp2, #\status 446 adr_l \tmp1, __early_cpu_boot_status 447 str \tmp2, [\tmp1] 448 dmb sy 449 dc ivac, \tmp1 // Invalidate potentially stale cache line 450 .endm 451 452/* 453 * Enable the MMU. 454 * 455 * x0 = SCTLR_EL1 value for turning on the MMU. 456 * x1 = TTBR1_EL1 value 457 * x2 = ID map root table address 458 * 459 * Returns to the caller via x30/lr. This requires the caller to be covered 460 * by the .idmap.text section. 461 * 462 * Checks if the selected granule size is supported by the CPU. 463 * If it isn't, park the CPU 464 */ 465 .section ".idmap.text","a" 466SYM_FUNC_START(__enable_mmu) 467 mrs x3, ID_AA64MMFR0_EL1 468 ubfx x3, x3, #ID_AA64MMFR0_EL1_TGRAN_SHIFT, 4 469 cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN 470 b.lt __no_granule_support 471 cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX 472 b.gt __no_granule_support 473 phys_to_ttbr x2, x2 474 msr ttbr0_el1, x2 // load TTBR0 475 load_ttbr1 x1, x1, x3 476 477 set_sctlr_el1 x0 478 479 ret 480SYM_FUNC_END(__enable_mmu) 481 482#ifdef CONFIG_ARM64_VA_BITS_52 483SYM_FUNC_START(__cpu_secondary_check52bitva) 484#ifndef CONFIG_ARM64_LPA2 485 mrs_s x0, SYS_ID_AA64MMFR2_EL1 486 and x0, x0, ID_AA64MMFR2_EL1_VARange_MASK 487 cbnz x0, 2f 488#else 489 mrs x0, id_aa64mmfr0_el1 490 sbfx x0, x0, #ID_AA64MMFR0_EL1_TGRAN_SHIFT, 4 491 cmp x0, #ID_AA64MMFR0_EL1_TGRAN_LPA2 492 b.ge 2f 493#endif 494 495 update_early_cpu_boot_status \ 496 CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_52_BIT_VA, x0, x1 4971: wfe 498 wfi 499 b 1b 500 5012: ret 502SYM_FUNC_END(__cpu_secondary_check52bitva) 503#endif 504 505SYM_FUNC_START_LOCAL(__no_granule_support) 506 /* Indicate that this CPU can't boot and is stuck in the kernel */ 507 update_early_cpu_boot_status \ 508 CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2 5091: 510 wfe 511 wfi 512 b 1b 513SYM_FUNC_END(__no_granule_support) 514 515SYM_FUNC_START_LOCAL(__primary_switch) 516 adrp x1, reserved_pg_dir 517 adrp x2, init_idmap_pg_dir 518 bl __enable_mmu 519 520 adrp x1, early_init_stack 521 mov sp, x1 522 mov x29, xzr 523 mov x0, x20 // pass the full boot status 524 mov x1, x21 // pass the FDT 525 bl __pi_early_map_kernel // Map and relocate the kernel 526 527 ldr x8, =__primary_switched 528 adrp x0, KERNEL_START // __pa(KERNEL_START) 529 br x8 530SYM_FUNC_END(__primary_switch) 531