1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Low-level CPU initialisation 4 * Based on arch/arm/kernel/head.S 5 * 6 * Copyright (C) 1994-2002 Russell King 7 * Copyright (C) 2003-2012 ARM Ltd. 8 * Authors: Catalin Marinas <catalin.marinas@arm.com> 9 * Will Deacon <will.deacon@arm.com> 10 */ 11 12#include <linux/linkage.h> 13#include <linux/init.h> 14#include <linux/pgtable.h> 15 16#include <asm/asm_pointer_auth.h> 17#include <asm/assembler.h> 18#include <asm/boot.h> 19#include <asm/bug.h> 20#include <asm/ptrace.h> 21#include <asm/asm-offsets.h> 22#include <asm/cache.h> 23#include <asm/cputype.h> 24#include <asm/el2_setup.h> 25#include <asm/elf.h> 26#include <asm/image.h> 27#include <asm/kernel-pgtable.h> 28#include <asm/kvm_arm.h> 29#include <asm/memory.h> 30#include <asm/pgtable-hwdef.h> 31#include <asm/page.h> 32#include <asm/scs.h> 33#include <asm/smp.h> 34#include <asm/sysreg.h> 35#include <asm/stacktrace/frame.h> 36#include <asm/thread_info.h> 37#include <asm/virt.h> 38 39#include "efi-header.S" 40 41#if (PAGE_OFFSET & 0x1fffff) != 0 42#error PAGE_OFFSET must be at least 2MB aligned 43#endif 44 45/* 46 * Kernel startup entry point. 47 * --------------------------- 48 * 49 * The requirements are: 50 * MMU = off, D-cache = off, I-cache = on or off, 51 * x0 = physical address to the FDT blob. 52 * 53 * Note that the callee-saved registers are used for storing variables 54 * that are useful before the MMU is enabled. The allocations are described 55 * in the entry routines. 56 */ 57 __HEAD 58 /* 59 * DO NOT MODIFY. Image header expected by Linux boot-loaders. 60 */ 61 efi_signature_nop // special NOP to identity as PE/COFF executable 62 b primary_entry // branch to kernel start, magic 63 .quad 0 // Image load offset from start of RAM, little-endian 64 le64sym _kernel_size_le // Effective size of kernel image, little-endian 65 le64sym _kernel_flags_le // Informative flags, little-endian 66 .quad 0 // reserved 67 .quad 0 // reserved 68 .quad 0 // reserved 69 .ascii ARM64_IMAGE_MAGIC // Magic number 70 .long .Lpe_header_offset // Offset to the PE header. 71 72 __EFI_PE_HEADER 73 74 .section ".idmap.text","a" 75 76 /* 77 * The following callee saved general purpose registers are used on the 78 * primary lowlevel boot path: 79 * 80 * Register Scope Purpose 81 * x19 primary_entry() .. start_kernel() whether we entered with the MMU on 82 * x20 primary_entry() .. __primary_switch() CPU boot mode 83 * x21 primary_entry() .. start_kernel() FDT pointer passed at boot in x0 84 */ 85SYM_CODE_START(primary_entry) 86 bl record_mmu_state 87 bl preserve_boot_args 88 89 adrp x1, early_init_stack 90 mov sp, x1 91 mov x29, xzr 92 adrp x0, init_idmap_pg_dir 93 mov x1, xzr 94 bl __pi_create_init_idmap 95 96 /* 97 * If the page tables have been populated with non-cacheable 98 * accesses (MMU disabled), invalidate those tables again to 99 * remove any speculatively loaded cache lines. 100 */ 101 cbnz x19, 0f 102 dmb sy 103 mov x1, x0 // end of used region 104 adrp x0, init_idmap_pg_dir 105 adr_l x2, dcache_inval_poc 106 blr x2 107 b 1f 108 109 /* 110 * If we entered with the MMU and caches on, clean the ID mapped part 111 * of the primary boot code to the PoC so we can safely execute it with 112 * the MMU off. 113 */ 1140: adrp x0, __idmap_text_start 115 adr_l x1, __idmap_text_end 116 adr_l x2, dcache_clean_poc 117 blr x2 118 1191: mov x0, x19 120 bl init_kernel_el // w0=cpu_boot_mode 121 mov x20, x0 122 123 /* 124 * The following calls CPU setup code, see arch/arm64/mm/proc.S for 125 * details. 126 * On return, the CPU will be ready for the MMU to be turned on and 127 * the TCR will have been set. 128 */ 129 bl __cpu_setup // initialise processor 130 b __primary_switch 131SYM_CODE_END(primary_entry) 132 133 __INIT 134SYM_CODE_START_LOCAL(record_mmu_state) 135 mrs x19, CurrentEL 136 cmp x19, #CurrentEL_EL2 137 mrs x19, sctlr_el1 138 b.ne 0f 139 mrs x19, sctlr_el2 1400: 141CPU_LE( tbnz x19, #SCTLR_ELx_EE_SHIFT, 1f ) 142CPU_BE( tbz x19, #SCTLR_ELx_EE_SHIFT, 1f ) 143 tst x19, #SCTLR_ELx_C // Z := (C == 0) 144 and x19, x19, #SCTLR_ELx_M // isolate M bit 145 csel x19, xzr, x19, eq // clear x19 if Z 146 ret 147 148 /* 149 * Set the correct endianness early so all memory accesses issued 150 * before init_kernel_el() occur in the correct byte order. Note that 151 * this means the MMU must be disabled, or the active ID map will end 152 * up getting interpreted with the wrong byte order. 153 */ 1541: eor x19, x19, #SCTLR_ELx_EE 155 bic x19, x19, #SCTLR_ELx_M 156 b.ne 2f 157 pre_disable_mmu_workaround 158 msr sctlr_el2, x19 159 b 3f 1602: pre_disable_mmu_workaround 161 msr sctlr_el1, x19 1623: isb 163 mov x19, xzr 164 ret 165SYM_CODE_END(record_mmu_state) 166 167/* 168 * Preserve the arguments passed by the bootloader in x0 .. x3 169 */ 170SYM_CODE_START_LOCAL(preserve_boot_args) 171 mov x21, x0 // x21=FDT 172 173 adr_l x0, boot_args // record the contents of 174 stp x21, x1, [x0] // x0 .. x3 at kernel entry 175 stp x2, x3, [x0, #16] 176 177 cbnz x19, 0f // skip cache invalidation if MMU is on 178 dmb sy // needed before dc ivac with 179 // MMU off 180 181 add x1, x0, #0x20 // 4 x 8 bytes 182 b dcache_inval_poc // tail call 1830: str_l x19, mmu_enabled_at_boot, x0 184 ret 185SYM_CODE_END(preserve_boot_args) 186 187 /* 188 * Initialize CPU registers with task-specific and cpu-specific context. 189 * 190 * Create a final frame record at task_pt_regs(current)->stackframe, so 191 * that the unwinder can identify the final frame record of any task by 192 * its location in the task stack. We reserve the entire pt_regs space 193 * for consistency with user tasks and kthreads. 194 */ 195 .macro init_cpu_task tsk, tmp1, tmp2 196 msr sp_el0, \tsk 197 198 ldr \tmp1, [\tsk, #TSK_STACK] 199 add sp, \tmp1, #THREAD_SIZE 200 sub sp, sp, #PT_REGS_SIZE 201 202 stp xzr, xzr, [sp, #S_STACKFRAME] 203 mov \tmp1, #FRAME_META_TYPE_FINAL 204 str \tmp1, [sp, #S_STACKFRAME_TYPE] 205 add x29, sp, #S_STACKFRAME 206 207 scs_load_current 208 209 adr_l \tmp1, __per_cpu_offset 210 ldr w\tmp2, [\tsk, #TSK_TI_CPU] 211 ldr \tmp1, [\tmp1, \tmp2, lsl #3] 212 set_this_cpu_offset \tmp1 213 .endm 214 215/* 216 * The following fragment of code is executed with the MMU enabled. 217 * 218 * x0 = __pa(KERNEL_START) 219 */ 220SYM_FUNC_START_LOCAL(__primary_switched) 221 adr_l x4, init_task 222 init_cpu_task x4, x5, x6 223 224 adr_l x8, vectors // load VBAR_EL1 with virtual 225 msr vbar_el1, x8 // vector table address 226 isb 227 228 stp x29, x30, [sp, #-16]! 229 mov x29, sp 230 231 str_l x21, __fdt_pointer, x5 // Save FDT pointer 232 233 adrp x4, _text // Save the offset between 234 sub x4, x4, x0 // the kernel virtual and 235 str_l x4, kimage_voffset, x5 // physical mappings 236 237 mov x0, x20 238 bl set_cpu_boot_mode_flag 239 240#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 241 bl kasan_early_init 242#endif 243 mov x0, x20 244 bl finalise_el2 // Prefer VHE if possible 245 ldp x29, x30, [sp], #16 246 bl start_kernel 247 ASM_BUG() 248SYM_FUNC_END(__primary_switched) 249 250/* 251 * end early head section, begin head code that is also used for 252 * hotplug and needs to have the same protections as the text region 253 */ 254 .section ".idmap.text","a" 255 256/* 257 * Starting from EL2 or EL1, configure the CPU to execute at the highest 258 * reachable EL supported by the kernel in a chosen default state. If dropping 259 * from EL2 to EL1, configure EL2 before configuring EL1. 260 * 261 * Since we cannot always rely on ERET synchronizing writes to sysregs (e.g. if 262 * SCTLR_ELx.EOS is clear), we place an ISB prior to ERET. 263 * 264 * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x0 if 265 * booted in EL1 or EL2 respectively, with the top 32 bits containing 266 * potential context flags. These flags are *not* stored in __boot_cpu_mode. 267 * 268 * x0: whether we are being called from the primary boot path with the MMU on 269 */ 270SYM_FUNC_START(init_kernel_el) 271 mrs x1, CurrentEL 272 cmp x1, #CurrentEL_EL2 273 b.eq init_el2 274 275SYM_INNER_LABEL(init_el1, SYM_L_LOCAL) 276 mov_q x0, INIT_SCTLR_EL1_MMU_OFF 277 pre_disable_mmu_workaround 278 msr sctlr_el1, x0 279 isb 280 mov_q x0, INIT_PSTATE_EL1 281 msr spsr_el1, x0 282 msr elr_el1, lr 283 mov w0, #BOOT_CPU_MODE_EL1 284 eret 285 286SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) 287 msr elr_el2, lr 288 289 // clean all HYP code to the PoC if we booted at EL2 with the MMU on 290 cbz x0, 0f 291 adrp x0, __hyp_idmap_text_start 292 adr_l x1, __hyp_text_end 293 adr_l x2, dcache_clean_poc 294 blr x2 295 296 mov_q x0, INIT_SCTLR_EL2_MMU_OFF 297 pre_disable_mmu_workaround 298 msr sctlr_el2, x0 299 isb 3000: 301 mov_q x0, HCR_HOST_NVHE_FLAGS 302 303 /* 304 * Compliant CPUs advertise their VHE-onlyness with 305 * ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2.E2H can be 306 * RES1 in that case. Publish the E2H bit early so that 307 * it can be picked up by the init_el2_state macro. 308 * 309 * Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but 310 * don't advertise it (they predate this relaxation). 311 */ 312 mrs_s x1, SYS_ID_AA64MMFR4_EL1 313 tbz x1, #(ID_AA64MMFR4_EL1_E2H0_SHIFT + ID_AA64MMFR4_EL1_E2H0_WIDTH - 1), 1f 314 315 orr x0, x0, #HCR_E2H 3161: 317 msr hcr_el2, x0 318 isb 319 320 init_el2_state 321 322 /* Hypervisor stub */ 323 adr_l x0, __hyp_stub_vectors 324 msr vbar_el2, x0 325 isb 326 327 mov_q x1, INIT_SCTLR_EL1_MMU_OFF 328 329 mrs x0, hcr_el2 330 and x0, x0, #HCR_E2H 331 cbz x0, 2f 332 333 /* Set a sane SCTLR_EL1, the VHE way */ 334 msr_s SYS_SCTLR_EL12, x1 335 mov x2, #BOOT_CPU_FLAG_E2H 336 b 3f 337 3382: 339 msr sctlr_el1, x1 340 mov x2, xzr 3413: 342 __init_el2_nvhe_prepare_eret 343 344 mov w0, #BOOT_CPU_MODE_EL2 345 orr x0, x0, x2 346 eret 347SYM_FUNC_END(init_kernel_el) 348 349 /* 350 * This provides a "holding pen" for platforms to hold all secondary 351 * cores are held until we're ready for them to initialise. 352 */ 353SYM_FUNC_START(secondary_holding_pen) 354 mov x0, xzr 355 bl init_kernel_el // w0=cpu_boot_mode 356 mrs x2, mpidr_el1 357 mov_q x1, MPIDR_HWID_BITMASK 358 and x2, x2, x1 359 adr_l x3, secondary_holding_pen_release 360pen: ldr x4, [x3] 361 cmp x4, x2 362 b.eq secondary_startup 363 wfe 364 b pen 365SYM_FUNC_END(secondary_holding_pen) 366 367 /* 368 * Secondary entry point that jumps straight into the kernel. Only to 369 * be used where CPUs are brought online dynamically by the kernel. 370 */ 371SYM_FUNC_START(secondary_entry) 372 mov x0, xzr 373 bl init_kernel_el // w0=cpu_boot_mode 374 b secondary_startup 375SYM_FUNC_END(secondary_entry) 376 377SYM_FUNC_START_LOCAL(secondary_startup) 378 /* 379 * Common entry point for secondary CPUs. 380 */ 381 mov x20, x0 // preserve boot mode 382 383#ifdef CONFIG_ARM64_VA_BITS_52 384alternative_if ARM64_HAS_VA52 385 bl __cpu_secondary_check52bitva 386alternative_else_nop_endif 387#endif 388 389 bl __cpu_setup // initialise processor 390 adrp x1, swapper_pg_dir 391 adrp x2, idmap_pg_dir 392 bl __enable_mmu 393 ldr x8, =__secondary_switched 394 br x8 395SYM_FUNC_END(secondary_startup) 396 397 .text 398SYM_FUNC_START_LOCAL(__secondary_switched) 399 mov x0, x20 400 bl set_cpu_boot_mode_flag 401 402 mov x0, x20 403 bl finalise_el2 404 405 str_l xzr, __early_cpu_boot_status, x3 406 adr_l x5, vectors 407 msr vbar_el1, x5 408 isb 409 410 adr_l x0, secondary_data 411 ldr x2, [x0, #CPU_BOOT_TASK] 412 cbz x2, __secondary_too_slow 413 414 init_cpu_task x2, x1, x3 415 416#ifdef CONFIG_ARM64_PTR_AUTH 417 ptrauth_keys_init_cpu x2, x3, x4, x5 418#endif 419 420 bl secondary_start_kernel 421 ASM_BUG() 422SYM_FUNC_END(__secondary_switched) 423 424SYM_FUNC_START_LOCAL(__secondary_too_slow) 425 wfe 426 wfi 427 b __secondary_too_slow 428SYM_FUNC_END(__secondary_too_slow) 429 430/* 431 * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed 432 * in w0. See arch/arm64/include/asm/virt.h for more info. 433 */ 434SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag) 435 adr_l x1, __boot_cpu_mode 436 cmp w0, #BOOT_CPU_MODE_EL2 437 b.ne 1f 438 add x1, x1, #4 4391: str w0, [x1] // Save CPU boot mode 440 ret 441SYM_FUNC_END(set_cpu_boot_mode_flag) 442 443/* 444 * The booting CPU updates the failed status @__early_cpu_boot_status, 445 * with MMU turned off. 446 * 447 * update_early_cpu_boot_status tmp, status 448 * - Corrupts tmp1, tmp2 449 * - Writes 'status' to __early_cpu_boot_status and makes sure 450 * it is committed to memory. 451 */ 452 453 .macro update_early_cpu_boot_status status, tmp1, tmp2 454 mov \tmp2, #\status 455 adr_l \tmp1, __early_cpu_boot_status 456 str \tmp2, [\tmp1] 457 dmb sy 458 dc ivac, \tmp1 // Invalidate potentially stale cache line 459 .endm 460 461/* 462 * Enable the MMU. 463 * 464 * x0 = SCTLR_EL1 value for turning on the MMU. 465 * x1 = TTBR1_EL1 value 466 * x2 = ID map root table address 467 * 468 * Returns to the caller via x30/lr. This requires the caller to be covered 469 * by the .idmap.text section. 470 * 471 * Checks if the selected granule size is supported by the CPU. 472 * If it isn't, park the CPU 473 */ 474 .section ".idmap.text","a" 475SYM_FUNC_START(__enable_mmu) 476 mrs x3, ID_AA64MMFR0_EL1 477 ubfx x3, x3, #ID_AA64MMFR0_EL1_TGRAN_SHIFT, 4 478 cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN 479 b.lt __no_granule_support 480 cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX 481 b.gt __no_granule_support 482 phys_to_ttbr x2, x2 483 msr ttbr0_el1, x2 // load TTBR0 484 load_ttbr1 x1, x1, x3 485 486 set_sctlr_el1 x0 487 488 ret 489SYM_FUNC_END(__enable_mmu) 490 491#ifdef CONFIG_ARM64_VA_BITS_52 492SYM_FUNC_START(__cpu_secondary_check52bitva) 493#ifndef CONFIG_ARM64_LPA2 494 mrs_s x0, SYS_ID_AA64MMFR2_EL1 495 and x0, x0, ID_AA64MMFR2_EL1_VARange_MASK 496 cbnz x0, 2f 497#else 498 mrs x0, id_aa64mmfr0_el1 499 sbfx x0, x0, #ID_AA64MMFR0_EL1_TGRAN_SHIFT, 4 500 cmp x0, #ID_AA64MMFR0_EL1_TGRAN_LPA2 501 b.ge 2f 502#endif 503 504 update_early_cpu_boot_status \ 505 CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_52_BIT_VA, x0, x1 5061: wfe 507 wfi 508 b 1b 509 5102: ret 511SYM_FUNC_END(__cpu_secondary_check52bitva) 512#endif 513 514SYM_FUNC_START_LOCAL(__no_granule_support) 515 /* Indicate that this CPU can't boot and is stuck in the kernel */ 516 update_early_cpu_boot_status \ 517 CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2 5181: 519 wfe 520 wfi 521 b 1b 522SYM_FUNC_END(__no_granule_support) 523 524SYM_FUNC_START_LOCAL(__primary_switch) 525 adrp x1, reserved_pg_dir 526 adrp x2, init_idmap_pg_dir 527 bl __enable_mmu 528 529 adrp x1, early_init_stack 530 mov sp, x1 531 mov x29, xzr 532 mov x0, x20 // pass the full boot status 533 mov x1, x21 // pass the FDT 534 bl __pi_early_map_kernel // Map and relocate the kernel 535 536 ldr x8, =__primary_switched 537 adrp x0, KERNEL_START // __pa(KERNEL_START) 538 br x8 539SYM_FUNC_END(__primary_switch) 540