head.S (005e12676af09a308f18cb94aa593bb30dee031e) head.S (aacd149b62382c63911060b8f64c1e3d89bd405a)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Low-level CPU initialisation
4 * Based on arch/arm/kernel/head.S
5 *
6 * Copyright (C) 1994-2002 Russell King
7 * Copyright (C) 2003-2012 ARM Ltd.
8 * Authors: Catalin Marinas <catalin.marinas@arm.com>

--- 72 unchanged lines hidden (view full) ---

81 * The following callee saved general purpose registers are used on the
82 * primary lowlevel boot path:
83 *
84 * Register Scope Purpose
85 * x20 primary_entry() .. __primary_switch() CPU boot mode
86 * x21 primary_entry() .. start_kernel() FDT pointer passed at boot in x0
87 * x22 create_idmap() .. start_kernel() ID map VA of the DT blob
88 * x23 primary_entry() .. start_kernel() physical misalignment/KASLR offset
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Low-level CPU initialisation
4 * Based on arch/arm/kernel/head.S
5 *
6 * Copyright (C) 1994-2002 Russell King
7 * Copyright (C) 2003-2012 ARM Ltd.
8 * Authors: Catalin Marinas <catalin.marinas@arm.com>

--- 72 unchanged lines hidden (view full) ---

81 * The following callee saved general purpose registers are used on the
82 * primary lowlevel boot path:
83 *
84 * Register Scope Purpose
85 * x20 primary_entry() .. __primary_switch() CPU boot mode
86 * x21 primary_entry() .. start_kernel() FDT pointer passed at boot in x0
87 * x22 create_idmap() .. start_kernel() ID map VA of the DT blob
88 * x23 primary_entry() .. start_kernel() physical misalignment/KASLR offset
89 * x24 __primary_switch() .. relocate_kernel() current RELR displacement
89 * x24 __primary_switch() linear map KASLR seed
90 * x28 create_idmap() callee preserved temp register
91 */
92SYM_CODE_START(primary_entry)
93 bl preserve_boot_args
94 bl init_kernel_el // w0=cpu_boot_mode
95 mov x20, x0
90 * x28 create_idmap() callee preserved temp register
91 */
92SYM_CODE_START(primary_entry)
93 bl preserve_boot_args
94 bl init_kernel_el // w0=cpu_boot_mode
95 mov x20, x0
96 adrp x23, __PHYS_OFFSET
97 and x23, x23, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0
98 bl create_idmap
99
100 /*
101 * The following calls CPU setup code, see arch/arm64/mm/proc.S for
102 * details.
103 * On return, the CPU will be ready for the MMU to be turned on and
104 * the TCR will have been set.
105 */

--- 330 unchanged lines hidden (view full) ---

436 // Clear BSS
437 adr_l x0, __bss_start
438 mov x1, xzr
439 adr_l x2, __bss_stop
440 sub x2, x2, x0
441 bl __pi_memset
442 dsb ishst // Make zero page visible to PTW
443
96 bl create_idmap
97
98 /*
99 * The following calls CPU setup code, see arch/arm64/mm/proc.S for
100 * details.
101 * On return, the CPU will be ready for the MMU to be turned on and
102 * the TCR will have been set.
103 */

--- 330 unchanged lines hidden (view full) ---

434 // Clear BSS
435 adr_l x0, __bss_start
436 mov x1, xzr
437 adr_l x2, __bss_stop
438 sub x2, x2, x0
439 bl __pi_memset
440 dsb ishst // Make zero page visible to PTW
441
442#ifdef CONFIG_RANDOMIZE_BASE
443 adrp x5, memstart_offset_seed // Save KASLR linear map seed
444 strh w24, [x5, :lo12:memstart_offset_seed]
445#endif
444#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
445 bl kasan_early_init
446#endif
447 mov x0, x21 // pass FDT address in x0
448 bl early_fdt_map // Try mapping the FDT early
449 mov x0, x22 // pass FDT address in x0
450 bl init_feature_override // Parse cpu feature overrides
446#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
447 bl kasan_early_init
448#endif
449 mov x0, x21 // pass FDT address in x0
450 bl early_fdt_map // Try mapping the FDT early
451 mov x0, x22 // pass FDT address in x0
452 bl init_feature_override // Parse cpu feature overrides
451#ifdef CONFIG_RANDOMIZE_BASE
452 tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized?
453 b.ne 0f
454 bl kaslr_early_init // parse FDT for KASLR options
455 cbz x0, 0f // KASLR disabled? just proceed
456 orr x23, x23, x0 // record KASLR offset
457 ldp x29, x30, [sp], #16 // we must enable KASLR, return
458 ret // to __primary_switch()
4590:
460#endif
461 mov x0, x20
462 bl switch_to_vhe // Prefer VHE if possible
463 ldp x29, x30, [sp], #16
464 bl start_kernel
465 ASM_BUG()
466SYM_FUNC_END(__primary_switched)
467
468/*

--- 285 unchanged lines hidden (view full) ---

754 * word immediately following the initial address, and each bit
755 * that follows represents the next word, in linear order. As such,
756 * a single bitmap can encode up to 63 relocations in a 64-bit object.
757 *
758 * In this implementation we store the address of the next RELR table
759 * entry in x9, the address being relocated by the current address or
760 * bitmap entry in x13 and the address being relocated by the current
761 * bit in x14.
453 mov x0, x20
454 bl switch_to_vhe // Prefer VHE if possible
455 ldp x29, x30, [sp], #16
456 bl start_kernel
457 ASM_BUG()
458SYM_FUNC_END(__primary_switched)
459
460/*

--- 285 unchanged lines hidden (view full) ---

746 * word immediately following the initial address, and each bit
747 * that follows represents the next word, in linear order. As such,
748 * a single bitmap can encode up to 63 relocations in a 64-bit object.
749 *
750 * In this implementation we store the address of the next RELR table
751 * entry in x9, the address being relocated by the current address or
752 * bitmap entry in x13 and the address being relocated by the current
753 * bit in x14.
762 *
763 * Because addends are stored in place in the binary, RELR relocations
764 * cannot be applied idempotently. We use x24 to keep track of the
765 * currently applied displacement so that we can correctly relocate if
766 * __relocate_kernel is called twice with non-zero displacements (i.e.
767 * if there is both a physical misalignment and a KASLR displacement).
768 */
769 adr_l x9, __relr_start
770 adr_l x10, __relr_end
771
754 */
755 adr_l x9, __relr_start
756 adr_l x10, __relr_end
757
772 sub x15, x23, x24 // delta from previous offset
773 cbz x15, 7f // nothing to do if unchanged
774 mov x24, x23 // save new offset
775
7762: cmp x9, x10
777 b.hs 7f
778 ldr x11, [x9], #8
779 tbnz x11, #0, 3f // branch to handle bitmaps
780 add x13, x11, x23
781 ldr x12, [x13] // relocate address entry
7582: cmp x9, x10
759 b.hs 7f
760 ldr x11, [x9], #8
761 tbnz x11, #0, 3f // branch to handle bitmaps
762 add x13, x11, x23
763 ldr x12, [x13] // relocate address entry
782 add x12, x12, x15
764 add x12, x12, x23
783 str x12, [x13], #8 // adjust to start of bitmap
784 b 2b
785
7863: mov x14, x13
7874: lsr x11, x11, #1
788 cbz x11, 6f
789 tbz x11, #0, 5f // skip bit if not set
790 ldr x12, [x14] // relocate bit
765 str x12, [x13], #8 // adjust to start of bitmap
766 b 2b
767
7683: mov x14, x13
7694: lsr x11, x11, #1
770 cbz x11, 6f
771 tbz x11, #0, 5f // skip bit if not set
772 ldr x12, [x14] // relocate bit
791 add x12, x12, x15
773 add x12, x12, x23
792 str x12, [x14]
793
7945: add x14, x14, #8 // move to next bit's address
795 b 4b
796
7976: /*
798 * Move to the next bitmap's address. 8 is the word size, and 63 is the
799 * number of significant bits in a bitmap entry.

--- 7 unchanged lines hidden (view full) ---

807
808SYM_FUNC_END(__relocate_kernel)
809#endif
810
811SYM_FUNC_START_LOCAL(__primary_switch)
812 adrp x1, reserved_pg_dir
813 adrp x2, init_idmap_pg_dir
814 bl __enable_mmu
774 str x12, [x14]
775
7765: add x14, x14, #8 // move to next bit's address
777 b 4b
778
7796: /*
780 * Move to the next bitmap's address. 8 is the word size, and 63 is the
781 * number of significant bits in a bitmap entry.

--- 7 unchanged lines hidden (view full) ---

789
790SYM_FUNC_END(__relocate_kernel)
791#endif
792
793SYM_FUNC_START_LOCAL(__primary_switch)
794 adrp x1, reserved_pg_dir
795 adrp x2, init_idmap_pg_dir
796 bl __enable_mmu
815
797#ifdef CONFIG_RELOCATABLE
798 adrp x23, __PHYS_OFFSET
799 and x23, x23, MIN_KIMG_ALIGN - 1
800#ifdef CONFIG_RANDOMIZE_BASE
801 mov x0, x22
802 adrp x1, init_pg_end
803 mov sp, x1
804 mov x29, xzr
805 bl __pi_kaslr_early_init
806 and x24, x0, #SZ_2M - 1 // capture memstart offset seed
807 bic x0, x0, #SZ_2M - 1
808 orr x23, x23, x0 // record kernel offset
809#endif
810#endif
816 bl clear_page_tables
817 bl create_kernel_mapping
818
819 adrp x1, init_pg_dir
820 load_ttbr1 x1, x1, x2
821#ifdef CONFIG_RELOCATABLE
811 bl clear_page_tables
812 bl create_kernel_mapping
813
814 adrp x1, init_pg_dir
815 load_ttbr1 x1, x1, x2
816#ifdef CONFIG_RELOCATABLE
822#ifdef CONFIG_RELR
823 mov x24, #0 // no RELR displacement yet
824#endif
825 bl __relocate_kernel
817 bl __relocate_kernel
826#ifdef CONFIG_RANDOMIZE_BASE
827 ldr x8, =__primary_switched
828 adrp x0, __PHYS_OFFSET
829 blr x8
830
831 /*
832 * If we return here, we have a KASLR displacement in x23 which we need
833 * to take into account by discarding the current kernel mapping and
834 * creating a new one.
835 */
836 adrp x1, reserved_pg_dir // Disable translations via TTBR1
837 load_ttbr1 x1, x1, x2
838 bl clear_page_tables
839 bl create_kernel_mapping // Recreate kernel mapping
840
841 tlbi vmalle1 // Remove any stale TLB entries
842 dsb nsh
843 isb
844
845 adrp x1, init_pg_dir // Re-enable translations via TTBR1
846 load_ttbr1 x1, x1, x2
847 bl __relocate_kernel
848#endif
818#endif
849#endif
850 ldr x8, =__primary_switched
851 adrp x0, __PHYS_OFFSET
852 br x8
853SYM_FUNC_END(__primary_switch)
819 ldr x8, =__primary_switched
820 adrp x0, __PHYS_OFFSET
821 br x8
822SYM_FUNC_END(__primary_switch)