1/* 2 * Low-level CPU initialisation 3 * Based on arch/arm/kernel/head.S 4 * 5 * Copyright (C) 1994-2002 Russell King 6 * Copyright (C) 2003-2012 ARM Ltd. 7 * Authors: Catalin Marinas <catalin.marinas@arm.com> 8 * Will Deacon <will.deacon@arm.com> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program. If not, see <http://www.gnu.org/licenses/>. 21 */ 22 23#include <linux/linkage.h> 24#include <linux/init.h> 25#include <linux/irqchip/arm-gic-v3.h> 26 27#include <asm/assembler.h> 28#include <asm/boot.h> 29#include <asm/ptrace.h> 30#include <asm/asm-offsets.h> 31#include <asm/cache.h> 32#include <asm/cputype.h> 33#include <asm/elf.h> 34#include <asm/kernel-pgtable.h> 35#include <asm/kvm_arm.h> 36#include <asm/memory.h> 37#include <asm/pgtable-hwdef.h> 38#include <asm/pgtable.h> 39#include <asm/page.h> 40#include <asm/smp.h> 41#include <asm/sysreg.h> 42#include <asm/thread_info.h> 43#include <asm/virt.h> 44 45#define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET) 46 47#if (TEXT_OFFSET & 0xfff) != 0 48#error TEXT_OFFSET must be at least 4KB aligned 49#elif (PAGE_OFFSET & 0x1fffff) != 0 50#error PAGE_OFFSET must be at least 2MB aligned 51#elif TEXT_OFFSET > 0x1fffff 52#error TEXT_OFFSET must be less than 2MB 53#endif 54 55/* 56 * Kernel startup entry point. 57 * --------------------------- 58 * 59 * The requirements are: 60 * MMU = off, D-cache = off, I-cache = on or off, 61 * x0 = physical address to the FDT blob. 62 * 63 * This code is mostly position independent so you call this at 64 * __pa(PAGE_OFFSET + TEXT_OFFSET). 65 * 66 * Note that the callee-saved registers are used for storing variables 67 * that are useful before the MMU is enabled. The allocations are described 68 * in the entry routines. 69 */ 70 __HEAD 71_head: 72 /* 73 * DO NOT MODIFY. Image header expected by Linux boot-loaders. 74 */ 75#ifdef CONFIG_EFI 76 /* 77 * This add instruction has no meaningful effect except that 78 * its opcode forms the magic "MZ" signature required by UEFI. 79 */ 80 add x13, x18, #0x16 81 b stext 82#else 83 b stext // branch to kernel start, magic 84 .long 0 // reserved 85#endif 86 le64sym _kernel_offset_le // Image load offset from start of RAM, little-endian 87 le64sym _kernel_size_le // Effective size of kernel image, little-endian 88 le64sym _kernel_flags_le // Informative flags, little-endian 89 .quad 0 // reserved 90 .quad 0 // reserved 91 .quad 0 // reserved 92 .byte 0x41 // Magic number, "ARM\x64" 93 .byte 0x52 94 .byte 0x4d 95 .byte 0x64 96#ifdef CONFIG_EFI 97 .long pe_header - _head // Offset to the PE header. 98#else 99 .word 0 // reserved 100#endif 101 102#ifdef CONFIG_EFI 103 .align 3 104pe_header: 105 .ascii "PE" 106 .short 0 107coff_header: 108 .short 0xaa64 // AArch64 109 .short 2 // nr_sections 110 .long 0 // TimeDateStamp 111 .long 0 // PointerToSymbolTable 112 .long 1 // NumberOfSymbols 113 .short section_table - optional_header // SizeOfOptionalHeader 114 .short 0x206 // Characteristics. 115 // IMAGE_FILE_DEBUG_STRIPPED | 116 // IMAGE_FILE_EXECUTABLE_IMAGE | 117 // IMAGE_FILE_LINE_NUMS_STRIPPED 118optional_header: 119 .short 0x20b // PE32+ format 120 .byte 0x02 // MajorLinkerVersion 121 .byte 0x14 // MinorLinkerVersion 122 .long _end - efi_header_end // SizeOfCode 123 .long 0 // SizeOfInitializedData 124 .long 0 // SizeOfUninitializedData 125 .long __efistub_entry - _head // AddressOfEntryPoint 126 .long efi_header_end - _head // BaseOfCode 127 128extra_header_fields: 129 .quad 0 // ImageBase 130 .long 0x1000 // SectionAlignment 131 .long PECOFF_FILE_ALIGNMENT // FileAlignment 132 .short 0 // MajorOperatingSystemVersion 133 .short 0 // MinorOperatingSystemVersion 134 .short 0 // MajorImageVersion 135 .short 0 // MinorImageVersion 136 .short 0 // MajorSubsystemVersion 137 .short 0 // MinorSubsystemVersion 138 .long 0 // Win32VersionValue 139 140 .long _end - _head // SizeOfImage 141 142 // Everything before the kernel image is considered part of the header 143 .long efi_header_end - _head // SizeOfHeaders 144 .long 0 // CheckSum 145 .short 0xa // Subsystem (EFI application) 146 .short 0 // DllCharacteristics 147 .quad 0 // SizeOfStackReserve 148 .quad 0 // SizeOfStackCommit 149 .quad 0 // SizeOfHeapReserve 150 .quad 0 // SizeOfHeapCommit 151 .long 0 // LoaderFlags 152 .long 0x6 // NumberOfRvaAndSizes 153 154 .quad 0 // ExportTable 155 .quad 0 // ImportTable 156 .quad 0 // ResourceTable 157 .quad 0 // ExceptionTable 158 .quad 0 // CertificationTable 159 .quad 0 // BaseRelocationTable 160 161 // Section table 162section_table: 163 164 /* 165 * The EFI application loader requires a relocation section 166 * because EFI applications must be relocatable. This is a 167 * dummy section as far as we are concerned. 168 */ 169 .ascii ".reloc" 170 .byte 0 171 .byte 0 // end of 0 padding of section name 172 .long 0 173 .long 0 174 .long 0 // SizeOfRawData 175 .long 0 // PointerToRawData 176 .long 0 // PointerToRelocations 177 .long 0 // PointerToLineNumbers 178 .short 0 // NumberOfRelocations 179 .short 0 // NumberOfLineNumbers 180 .long 0x42100040 // Characteristics (section flags) 181 182 183 .ascii ".text" 184 .byte 0 185 .byte 0 186 .byte 0 // end of 0 padding of section name 187 .long _end - efi_header_end // VirtualSize 188 .long efi_header_end - _head // VirtualAddress 189 .long _edata - efi_header_end // SizeOfRawData 190 .long efi_header_end - _head // PointerToRawData 191 192 .long 0 // PointerToRelocations (0 for executables) 193 .long 0 // PointerToLineNumbers (0 for executables) 194 .short 0 // NumberOfRelocations (0 for executables) 195 .short 0 // NumberOfLineNumbers (0 for executables) 196 .long 0xe0500020 // Characteristics (section flags) 197 198 /* 199 * EFI will load .text onwards at the 4k section alignment 200 * described in the PE/COFF header. To ensure that instruction 201 * sequences using an adrp and a :lo12: immediate will function 202 * correctly at this alignment, we must ensure that .text is 203 * placed at a 4k boundary in the Image to begin with. 204 */ 205 .align 12 206efi_header_end: 207#endif 208 209 __INIT 210 211 /* 212 * The following callee saved general purpose registers are used on the 213 * primary lowlevel boot path: 214 * 215 * Register Scope Purpose 216 * x21 stext() .. start_kernel() FDT pointer passed at boot in x0 217 * x23 stext() .. start_kernel() physical misalignment/KASLR offset 218 * x28 __create_page_tables() callee preserved temp register 219 * x19/x20 __primary_switch() callee preserved temp registers 220 */ 221ENTRY(stext) 222 bl preserve_boot_args 223 bl el2_setup // Drop to EL1, w0=cpu_boot_mode 224 adrp x23, __PHYS_OFFSET 225 and x23, x23, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0 226 bl set_cpu_boot_mode_flag 227 bl __create_page_tables 228 /* 229 * The following calls CPU setup code, see arch/arm64/mm/proc.S for 230 * details. 231 * On return, the CPU will be ready for the MMU to be turned on and 232 * the TCR will have been set. 233 */ 234 bl __cpu_setup // initialise processor 235 b __primary_switch 236ENDPROC(stext) 237 238/* 239 * Preserve the arguments passed by the bootloader in x0 .. x3 240 */ 241preserve_boot_args: 242 mov x21, x0 // x21=FDT 243 244 adr_l x0, boot_args // record the contents of 245 stp x21, x1, [x0] // x0 .. x3 at kernel entry 246 stp x2, x3, [x0, #16] 247 248 dmb sy // needed before dc ivac with 249 // MMU off 250 251 add x1, x0, #0x20 // 4 x 8 bytes 252 b __inval_cache_range // tail call 253ENDPROC(preserve_boot_args) 254 255/* 256 * Macro to create a table entry to the next page. 257 * 258 * tbl: page table address 259 * virt: virtual address 260 * shift: #imm page table shift 261 * ptrs: #imm pointers per table page 262 * 263 * Preserves: virt 264 * Corrupts: tmp1, tmp2 265 * Returns: tbl -> next level table page address 266 */ 267 .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2 268 lsr \tmp1, \virt, #\shift 269 and \tmp1, \tmp1, #\ptrs - 1 // table index 270 add \tmp2, \tbl, #PAGE_SIZE 271 orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type 272 str \tmp2, [\tbl, \tmp1, lsl #3] 273 add \tbl, \tbl, #PAGE_SIZE // next level table page 274 .endm 275 276/* 277 * Macro to populate the PGD (and possibily PUD) for the corresponding 278 * block entry in the next level (tbl) for the given virtual address. 279 * 280 * Preserves: tbl, next, virt 281 * Corrupts: tmp1, tmp2 282 */ 283 .macro create_pgd_entry, tbl, virt, tmp1, tmp2 284 create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2 285#if SWAPPER_PGTABLE_LEVELS > 3 286 create_table_entry \tbl, \virt, PUD_SHIFT, PTRS_PER_PUD, \tmp1, \tmp2 287#endif 288#if SWAPPER_PGTABLE_LEVELS > 2 289 create_table_entry \tbl, \virt, SWAPPER_TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2 290#endif 291 .endm 292 293/* 294 * Macro to populate block entries in the page table for the start..end 295 * virtual range (inclusive). 296 * 297 * Preserves: tbl, flags 298 * Corrupts: phys, start, end, pstate 299 */ 300 .macro create_block_map, tbl, flags, phys, start, end 301 lsr \phys, \phys, #SWAPPER_BLOCK_SHIFT 302 lsr \start, \start, #SWAPPER_BLOCK_SHIFT 303 and \start, \start, #PTRS_PER_PTE - 1 // table index 304 orr \phys, \flags, \phys, lsl #SWAPPER_BLOCK_SHIFT // table entry 305 lsr \end, \end, #SWAPPER_BLOCK_SHIFT 306 and \end, \end, #PTRS_PER_PTE - 1 // table end index 3079999: str \phys, [\tbl, \start, lsl #3] // store the entry 308 add \start, \start, #1 // next entry 309 add \phys, \phys, #SWAPPER_BLOCK_SIZE // next block 310 cmp \start, \end 311 b.ls 9999b 312 .endm 313 314/* 315 * Setup the initial page tables. We only setup the barest amount which is 316 * required to get the kernel running. The following sections are required: 317 * - identity mapping to enable the MMU (low address, TTBR0) 318 * - first few MB of the kernel linear mapping to jump to once the MMU has 319 * been enabled 320 */ 321__create_page_tables: 322 mov x28, lr 323 324 /* 325 * Invalidate the idmap and swapper page tables to avoid potential 326 * dirty cache lines being evicted. 327 */ 328 adrp x0, idmap_pg_dir 329 adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE 330 bl __inval_cache_range 331 332 /* 333 * Clear the idmap and swapper page tables. 334 */ 335 adrp x0, idmap_pg_dir 336 adrp x6, swapper_pg_dir + SWAPPER_DIR_SIZE 3371: stp xzr, xzr, [x0], #16 338 stp xzr, xzr, [x0], #16 339 stp xzr, xzr, [x0], #16 340 stp xzr, xzr, [x0], #16 341 cmp x0, x6 342 b.lo 1b 343 344 mov x7, SWAPPER_MM_MMUFLAGS 345 346 /* 347 * Create the identity mapping. 348 */ 349 adrp x0, idmap_pg_dir 350 adrp x3, __idmap_text_start // __pa(__idmap_text_start) 351 352#ifndef CONFIG_ARM64_VA_BITS_48 353#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) 354#define EXTRA_PTRS (1 << (48 - EXTRA_SHIFT)) 355 356 /* 357 * If VA_BITS < 48, it may be too small to allow for an ID mapping to be 358 * created that covers system RAM if that is located sufficiently high 359 * in the physical address space. So for the ID map, use an extended 360 * virtual range in that case, by configuring an additional translation 361 * level. 362 * First, we have to verify our assumption that the current value of 363 * VA_BITS was chosen such that all translation levels are fully 364 * utilised, and that lowering T0SZ will always result in an additional 365 * translation level to be configured. 366 */ 367#if VA_BITS != EXTRA_SHIFT 368#error "Mismatch between VA_BITS and page size/number of translation levels" 369#endif 370 371 /* 372 * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the 373 * entire ID map region can be mapped. As T0SZ == (64 - #bits used), 374 * this number conveniently equals the number of leading zeroes in 375 * the physical address of __idmap_text_end. 376 */ 377 adrp x5, __idmap_text_end 378 clz x5, x5 379 cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough? 380 b.ge 1f // .. then skip additional level 381 382 adr_l x6, idmap_t0sz 383 str x5, [x6] 384 dmb sy 385 dc ivac, x6 // Invalidate potentially stale cache line 386 387 create_table_entry x0, x3, EXTRA_SHIFT, EXTRA_PTRS, x5, x6 3881: 389#endif 390 391 create_pgd_entry x0, x3, x5, x6 392 mov x5, x3 // __pa(__idmap_text_start) 393 adr_l x6, __idmap_text_end // __pa(__idmap_text_end) 394 create_block_map x0, x7, x3, x5, x6 395 396 /* 397 * Map the kernel image (starting with PHYS_OFFSET). 398 */ 399 adrp x0, swapper_pg_dir 400 mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text) 401 add x5, x5, x23 // add KASLR displacement 402 create_pgd_entry x0, x5, x3, x6 403 adrp x6, _end // runtime __pa(_end) 404 adrp x3, _text // runtime __pa(_text) 405 sub x6, x6, x3 // _end - _text 406 add x6, x6, x5 // runtime __va(_end) 407 create_block_map x0, x7, x3, x5, x6 408 409 /* 410 * Since the page tables have been populated with non-cacheable 411 * accesses (MMU disabled), invalidate the idmap and swapper page 412 * tables again to remove any speculatively loaded cache lines. 413 */ 414 adrp x0, idmap_pg_dir 415 adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE 416 dmb sy 417 bl __inval_cache_range 418 419 ret x28 420ENDPROC(__create_page_tables) 421 .ltorg 422 423/* 424 * The following fragment of code is executed with the MMU enabled. 425 * 426 * x0 = __PHYS_OFFSET 427 */ 428__primary_switched: 429 adrp x4, init_thread_union 430 add sp, x4, #THREAD_SIZE 431 msr sp_el0, x4 // Save thread_info 432 433 adr_l x8, vectors // load VBAR_EL1 with virtual 434 msr vbar_el1, x8 // vector table address 435 isb 436 437 stp xzr, x30, [sp, #-16]! 438 mov x29, sp 439 440 str_l x21, __fdt_pointer, x5 // Save FDT pointer 441 442 ldr_l x4, kimage_vaddr // Save the offset between 443 sub x4, x4, x0 // the kernel virtual and 444 str_l x4, kimage_voffset, x5 // physical mappings 445 446 // Clear BSS 447 adr_l x0, __bss_start 448 mov x1, xzr 449 adr_l x2, __bss_stop 450 sub x2, x2, x0 451 bl __pi_memset 452 dsb ishst // Make zero page visible to PTW 453 454#ifdef CONFIG_KASAN 455 bl kasan_early_init 456#endif 457#ifdef CONFIG_RANDOMIZE_BASE 458 tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized? 459 b.ne 0f 460 mov x0, x21 // pass FDT address in x0 461 mov x1, x23 // pass modulo offset in x1 462 bl kaslr_early_init // parse FDT for KASLR options 463 cbz x0, 0f // KASLR disabled? just proceed 464 orr x23, x23, x0 // record KASLR offset 465 ldp x29, x30, [sp], #16 // we must enable KASLR, return 466 ret // to __primary_switch() 4670: 468#endif 469 b start_kernel 470ENDPROC(__primary_switched) 471 472/* 473 * end early head section, begin head code that is also used for 474 * hotplug and needs to have the same protections as the text region 475 */ 476 .section ".idmap.text","ax" 477 478ENTRY(kimage_vaddr) 479 .quad _text - TEXT_OFFSET 480 481/* 482 * If we're fortunate enough to boot at EL2, ensure that the world is 483 * sane before dropping to EL1. 484 * 485 * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x20 if 486 * booted in EL1 or EL2 respectively. 487 */ 488ENTRY(el2_setup) 489 mrs x0, CurrentEL 490 cmp x0, #CurrentEL_EL2 491 b.ne 1f 492 mrs x0, sctlr_el2 493CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2 494CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2 495 msr sctlr_el2, x0 496 b 2f 4971: mrs x0, sctlr_el1 498CPU_BE( orr x0, x0, #(3 << 24) ) // Set the EE and E0E bits for EL1 499CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1 500 msr sctlr_el1, x0 501 mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1 502 isb 503 ret 504 5052: 506#ifdef CONFIG_ARM64_VHE 507 /* 508 * Check for VHE being present. For the rest of the EL2 setup, 509 * x2 being non-zero indicates that we do have VHE, and that the 510 * kernel is intended to run at EL2. 511 */ 512 mrs x2, id_aa64mmfr1_el1 513 ubfx x2, x2, #8, #4 514#else 515 mov x2, xzr 516#endif 517 518 /* Hyp configuration. */ 519 mov x0, #HCR_RW // 64-bit EL1 520 cbz x2, set_hcr 521 orr x0, x0, #HCR_TGE // Enable Host Extensions 522 orr x0, x0, #HCR_E2H 523set_hcr: 524 msr hcr_el2, x0 525 isb 526 527 /* Generic timers. */ 528 mrs x0, cnthctl_el2 529 orr x0, x0, #3 // Enable EL1 physical timers 530 msr cnthctl_el2, x0 531 msr cntvoff_el2, xzr // Clear virtual offset 532 533#ifdef CONFIG_ARM_GIC_V3 534 /* GICv3 system register access */ 535 mrs x0, id_aa64pfr0_el1 536 ubfx x0, x0, #24, #4 537 cmp x0, #1 538 b.ne 3f 539 540 mrs_s x0, ICC_SRE_EL2 541 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 542 orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1 543 msr_s ICC_SRE_EL2, x0 544 isb // Make sure SRE is now set 545 mrs_s x0, ICC_SRE_EL2 // Read SRE back, 546 tbz x0, #0, 3f // and check that it sticks 547 msr_s ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults 548 5493: 550#endif 551 552 /* Populate ID registers. */ 553 mrs x0, midr_el1 554 mrs x1, mpidr_el1 555 msr vpidr_el2, x0 556 msr vmpidr_el2, x1 557 558 /* 559 * When VHE is not in use, early init of EL2 and EL1 needs to be 560 * done here. 561 * When VHE _is_ in use, EL1 will not be used in the host and 562 * requires no configuration, and all non-hyp-specific EL2 setup 563 * will be done via the _EL1 system register aliases in __cpu_setup. 564 */ 565 cbnz x2, 1f 566 567 /* sctlr_el1 */ 568 mov x0, #0x0800 // Set/clear RES{1,0} bits 569CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems 570CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems 571 msr sctlr_el1, x0 572 573 /* Coprocessor traps. */ 574 mov x0, #0x33ff 575 msr cptr_el2, x0 // Disable copro. traps to EL2 5761: 577 578#ifdef CONFIG_COMPAT 579 msr hstr_el2, xzr // Disable CP15 traps to EL2 580#endif 581 582 /* EL2 debug */ 583 mrs x0, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer 584 sbfx x0, x0, #8, #4 585 cmp x0, #1 586 b.lt 4f // Skip if no PMU present 587 mrs x0, pmcr_el0 // Disable debug access traps 588 ubfx x0, x0, #11, #5 // to EL2 and allow access to 5894: 590 csel x0, xzr, x0, lt // all PMU counters from EL1 591 msr mdcr_el2, x0 // (if they exist) 592 593 /* Stage-2 translation */ 594 msr vttbr_el2, xzr 595 596 cbz x2, install_el2_stub 597 598 mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 599 isb 600 ret 601 602install_el2_stub: 603 /* Hypervisor stub */ 604 adrp x0, __hyp_stub_vectors 605 add x0, x0, #:lo12:__hyp_stub_vectors 606 msr vbar_el2, x0 607 608 /* spsr */ 609 mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ 610 PSR_MODE_EL1h) 611 msr spsr_el2, x0 612 msr elr_el2, lr 613 mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 614 eret 615ENDPROC(el2_setup) 616 617/* 618 * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed 619 * in x20. See arch/arm64/include/asm/virt.h for more info. 620 */ 621set_cpu_boot_mode_flag: 622 adr_l x1, __boot_cpu_mode 623 cmp w0, #BOOT_CPU_MODE_EL2 624 b.ne 1f 625 add x1, x1, #4 6261: str w0, [x1] // This CPU has booted in EL1 627 dmb sy 628 dc ivac, x1 // Invalidate potentially stale cache line 629 ret 630ENDPROC(set_cpu_boot_mode_flag) 631 632/* 633 * These values are written with the MMU off, but read with the MMU on. 634 * Writers will invalidate the corresponding address, discarding up to a 635 * 'Cache Writeback Granule' (CWG) worth of data. The linker script ensures 636 * sufficient alignment that the CWG doesn't overlap another section. 637 */ 638 .pushsection ".mmuoff.data.write", "aw" 639/* 640 * We need to find out the CPU boot mode long after boot, so we need to 641 * store it in a writable variable. 642 * 643 * This is not in .bss, because we set it sufficiently early that the boot-time 644 * zeroing of .bss would clobber it. 645 */ 646ENTRY(__boot_cpu_mode) 647 .long BOOT_CPU_MODE_EL2 648 .long BOOT_CPU_MODE_EL1 649/* 650 * The booting CPU updates the failed status @__early_cpu_boot_status, 651 * with MMU turned off. 652 */ 653ENTRY(__early_cpu_boot_status) 654 .long 0 655 656 .popsection 657 658 /* 659 * This provides a "holding pen" for platforms to hold all secondary 660 * cores are held until we're ready for them to initialise. 661 */ 662ENTRY(secondary_holding_pen) 663 bl el2_setup // Drop to EL1, w0=cpu_boot_mode 664 bl set_cpu_boot_mode_flag 665 mrs x0, mpidr_el1 666 mov_q x1, MPIDR_HWID_BITMASK 667 and x0, x0, x1 668 adr_l x3, secondary_holding_pen_release 669pen: ldr x4, [x3] 670 cmp x4, x0 671 b.eq secondary_startup 672 wfe 673 b pen 674ENDPROC(secondary_holding_pen) 675 676 /* 677 * Secondary entry point that jumps straight into the kernel. Only to 678 * be used where CPUs are brought online dynamically by the kernel. 679 */ 680ENTRY(secondary_entry) 681 bl el2_setup // Drop to EL1 682 bl set_cpu_boot_mode_flag 683 b secondary_startup 684ENDPROC(secondary_entry) 685 686secondary_startup: 687 /* 688 * Common entry point for secondary CPUs. 689 */ 690 bl __cpu_setup // initialise processor 691 bl __enable_mmu 692 ldr x8, =__secondary_switched 693 br x8 694ENDPROC(secondary_startup) 695 696__secondary_switched: 697 adr_l x5, vectors 698 msr vbar_el1, x5 699 isb 700 701 adr_l x0, secondary_data 702 ldr x0, [x0, #CPU_BOOT_STACK] // get secondary_data.stack 703 mov sp, x0 704 and x0, x0, #~(THREAD_SIZE - 1) 705 msr sp_el0, x0 // save thread_info 706 mov x29, #0 707 b secondary_start_kernel 708ENDPROC(__secondary_switched) 709 710/* 711 * The booting CPU updates the failed status @__early_cpu_boot_status, 712 * with MMU turned off. 713 * 714 * update_early_cpu_boot_status tmp, status 715 * - Corrupts tmp1, tmp2 716 * - Writes 'status' to __early_cpu_boot_status and makes sure 717 * it is committed to memory. 718 */ 719 720 .macro update_early_cpu_boot_status status, tmp1, tmp2 721 mov \tmp2, #\status 722 adr_l \tmp1, __early_cpu_boot_status 723 str \tmp2, [\tmp1] 724 dmb sy 725 dc ivac, \tmp1 // Invalidate potentially stale cache line 726 .endm 727 728/* 729 * Enable the MMU. 730 * 731 * x0 = SCTLR_EL1 value for turning on the MMU. 732 * 733 * Returns to the caller via x30/lr. This requires the caller to be covered 734 * by the .idmap.text section. 735 * 736 * Checks if the selected granule size is supported by the CPU. 737 * If it isn't, park the CPU 738 */ 739ENTRY(__enable_mmu) 740 mrs x1, ID_AA64MMFR0_EL1 741 ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4 742 cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED 743 b.ne __no_granule_support 744 update_early_cpu_boot_status 0, x1, x2 745 adrp x1, idmap_pg_dir 746 adrp x2, swapper_pg_dir 747 msr ttbr0_el1, x1 // load TTBR0 748 msr ttbr1_el1, x2 // load TTBR1 749 isb 750 msr sctlr_el1, x0 751 isb 752 /* 753 * Invalidate the local I-cache so that any instructions fetched 754 * speculatively from the PoC are discarded, since they may have 755 * been dynamically patched at the PoU. 756 */ 757 ic iallu 758 dsb nsh 759 isb 760 ret 761ENDPROC(__enable_mmu) 762 763__no_granule_support: 764 /* Indicate that this CPU can't boot and is stuck in the kernel */ 765 update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x1, x2 7661: 767 wfe 768 wfi 769 b 1b 770ENDPROC(__no_granule_support) 771 772#ifdef CONFIG_RELOCATABLE 773__relocate_kernel: 774 /* 775 * Iterate over each entry in the relocation table, and apply the 776 * relocations in place. 777 */ 778 ldr w9, =__rela_offset // offset to reloc table 779 ldr w10, =__rela_size // size of reloc table 780 781 mov_q x11, KIMAGE_VADDR // default virtual offset 782 add x11, x11, x23 // actual virtual offset 783 add x9, x9, x11 // __va(.rela) 784 add x10, x9, x10 // __va(.rela) + sizeof(.rela) 785 7860: cmp x9, x10 787 b.hs 1f 788 ldp x11, x12, [x9], #24 789 ldr x13, [x9, #-8] 790 cmp w12, #R_AARCH64_RELATIVE 791 b.ne 0b 792 add x13, x13, x23 // relocate 793 str x13, [x11, x23] 794 b 0b 7951: ret 796ENDPROC(__relocate_kernel) 797#endif 798 799__primary_switch: 800#ifdef CONFIG_RANDOMIZE_BASE 801 mov x19, x0 // preserve new SCTLR_EL1 value 802 mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value 803#endif 804 805 bl __enable_mmu 806#ifdef CONFIG_RELOCATABLE 807 bl __relocate_kernel 808#ifdef CONFIG_RANDOMIZE_BASE 809 ldr x8, =__primary_switched 810 adrp x0, __PHYS_OFFSET 811 blr x8 812 813 /* 814 * If we return here, we have a KASLR displacement in x23 which we need 815 * to take into account by discarding the current kernel mapping and 816 * creating a new one. 817 */ 818 msr sctlr_el1, x20 // disable the MMU 819 isb 820 bl __create_page_tables // recreate kernel mapping 821 822 tlbi vmalle1 // Remove any stale TLB entries 823 dsb nsh 824 825 msr sctlr_el1, x19 // re-enable the MMU 826 isb 827 ic iallu // flush instructions fetched 828 dsb nsh // via old mapping 829 isb 830 831 bl __relocate_kernel 832#endif 833#endif 834 ldr x8, =__primary_switched 835 adrp x0, __PHYS_OFFSET 836 br x8 837ENDPROC(__primary_switch) 838