1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7#include <linux/arm-smccc.h> 8#include <linux/cfi_types.h> 9#include <linux/linkage.h> 10 11#include <asm/alternative.h> 12#include <asm/assembler.h> 13#include <asm/el2_setup.h> 14#include <asm/kvm_arm.h> 15#include <asm/kvm_asm.h> 16#include <asm/kvm_mmu.h> 17#include <asm/pgtable-hwdef.h> 18#include <asm/sysreg.h> 19#include <asm/virt.h> 20 21 .text 22 .pushsection .idmap.text, "ax" 23 24 .align 11 25 26SYM_CODE_START(__kvm_hyp_init) 27 ventry . // Synchronous EL2t 28 ventry . // IRQ EL2t 29 ventry . // FIQ EL2t 30 ventry . // Error EL2t 31 32 ventry . // Synchronous EL2h 33 ventry . // IRQ EL2h 34 ventry . // FIQ EL2h 35 ventry . // Error EL2h 36 37 ventry __do_hyp_init // Synchronous 64-bit EL1 38 ventry . // IRQ 64-bit EL1 39 ventry . // FIQ 64-bit EL1 40 ventry . // Error 64-bit EL1 41 42 ventry . // Synchronous 32-bit EL1 43 ventry . // IRQ 32-bit EL1 44 ventry . // FIQ 32-bit EL1 45 ventry . // Error 32-bit EL1 46 47 /* 48 * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers. 49 * 50 * x0: SMCCC function ID 51 * x1: struct kvm_nvhe_init_params PA 52 */ 53__do_hyp_init: 54 /* Check for a stub HVC call */ 55 cmp x0, #HVC_STUB_HCALL_NR 56 b.lo __kvm_handle_stub_hvc 57 58 bic x0, x0, #ARM_SMCCC_CALL_HINTS 59 mov x3, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) 60 cmp x0, x3 61 b.eq 1f 62 63 mov x0, #SMCCC_RET_NOT_SUPPORTED 64 eret 65 661: mov x0, x1 67 mov x3, lr 68 bl ___kvm_hyp_init // Clobbers x0..x2 69 mov lr, x3 70 71 /* Hello, World! */ 72 mov x0, #SMCCC_RET_SUCCESS 73 eret 74SYM_CODE_END(__kvm_hyp_init) 75 76/* 77 * Initialize EL2 CPU state to sane values. 78 * 79 * HCR_EL2.E2H must have been initialized already. 80 */ 81SYM_CODE_START_LOCAL(__kvm_init_el2_state) 82 init_el2_state // Clobbers x0..x2 83 finalise_el2_state 84 ret 85SYM_CODE_END(__kvm_init_el2_state) 86 87/* 88 * Initialize the hypervisor in EL2. 89 * 90 * Only uses x0..x2 so as to not clobber callee-saved SMCCC registers 91 * and leave x3 for the caller. 92 * 93 * x0: struct kvm_nvhe_init_params PA 94 */ 95SYM_CODE_START_LOCAL(___kvm_hyp_init) 96 ldr x1, [x0, #NVHE_INIT_STACK_HYP_VA] 97 mov sp, x1 98 99 ldr x1, [x0, #NVHE_INIT_MAIR_EL2] 100 msr mair_el2, x1 101 102 ldr x1, [x0, #NVHE_INIT_HCR_EL2] 103 msr hcr_el2, x1 104 105 mov x2, #HCR_E2H 106 and x2, x1, x2 107 cbz x2, 1f 108 109 // hVHE: Replay the EL2 setup to account for the E2H bit 110 // TPIDR_EL2 is used to preserve x0 across the macro maze... 111 isb 112 msr tpidr_el2, x0 113 str lr, [x0, #NVHE_INIT_TMP] 114 115 bl __kvm_init_el2_state 116 117 mrs x0, tpidr_el2 118 ldr lr, [x0, #NVHE_INIT_TMP] 119 1201: 121 ldr x1, [x0, #NVHE_INIT_TPIDR_EL2] 122 msr tpidr_el2, x1 123 124 ldr x1, [x0, #NVHE_INIT_VTTBR] 125 msr vttbr_el2, x1 126 127 ldr x1, [x0, #NVHE_INIT_VTCR] 128 msr vtcr_el2, x1 129 130 ldr x1, [x0, #NVHE_INIT_PGD_PA] 131 phys_to_ttbr x2, x1 132alternative_if ARM64_HAS_CNP 133 orr x2, x2, #TTBR_CNP_BIT 134alternative_else_nop_endif 135 msr ttbr0_el2, x2 136 137 ldr x0, [x0, #NVHE_INIT_TCR_EL2] 138 msr tcr_el2, x0 139 140 isb 141 142 /* Invalidate the stale TLBs from Bootloader */ 143 tlbi alle2 144 tlbi alle1 145 dsb sy 146 147 mov_q x0, INIT_SCTLR_EL2_MMU_ON 148alternative_if ARM64_HAS_ADDRESS_AUTH 149 mov_q x1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \ 150 SCTLR_ELx_ENDA | SCTLR_ELx_ENDB) 151 orr x0, x0, x1 152alternative_else_nop_endif 153 154#ifdef CONFIG_ARM64_BTI_KERNEL 155alternative_if ARM64_BTI 156 orr x0, x0, #SCTLR_EL2_BT 157alternative_else_nop_endif 158#endif /* CONFIG_ARM64_BTI_KERNEL */ 159 160 msr sctlr_el2, x0 161 isb 162 163 /* Set the host vector */ 164 ldr x0, =__kvm_hyp_host_vector 165 msr vbar_el2, x0 166 167 ret 168SYM_CODE_END(___kvm_hyp_init) 169 170/* 171 * PSCI CPU_ON entry point 172 * 173 * x0: struct kvm_nvhe_init_params PA 174 */ 175SYM_CODE_START(kvm_hyp_cpu_entry) 176 mov x1, #1 // is_cpu_on = true 177 b __kvm_hyp_init_cpu 178SYM_CODE_END(kvm_hyp_cpu_entry) 179 180/* 181 * PSCI CPU_SUSPEND / SYSTEM_SUSPEND entry point 182 * 183 * x0: struct kvm_nvhe_init_params PA 184 */ 185SYM_CODE_START(kvm_hyp_cpu_resume) 186 mov x1, #0 // is_cpu_on = false 187 b __kvm_hyp_init_cpu 188SYM_CODE_END(kvm_hyp_cpu_resume) 189 190/* 191 * Common code for CPU entry points. Initializes EL2 state and 192 * installs the hypervisor before handing over to a C handler. 193 * 194 * x0: struct kvm_nvhe_init_params PA 195 * x1: bool is_cpu_on 196 */ 197SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu) 198 mov x28, x0 // Stash arguments 199 mov x29, x1 200 201 /* Check that the core was booted in EL2. */ 202 mrs x0, CurrentEL 203 cmp x0, #CurrentEL_EL2 204 b.eq 2f 205 206 /* The core booted in EL1. KVM cannot be initialized on it. */ 2071: wfe 208 wfi 209 b 1b 210 2112: msr SPsel, #1 // We want to use SP_EL{1,2} 212 213 init_el2_hcr 0 214 215 bl __kvm_init_el2_state 216 217 /* Enable MMU, set vectors and stack. */ 218 mov x0, x28 219 bl ___kvm_hyp_init // Clobbers x0..x2 220 221 /* Leave idmap. */ 222 mov x0, x29 223 ldr x1, =kvm_host_psci_cpu_entry 224 br x1 225SYM_CODE_END(__kvm_hyp_init_cpu) 226 227SYM_CODE_START(__kvm_handle_stub_hvc) 228 /* 229 * __kvm_handle_stub_hvc called from __host_hvc through branch instruction(br) so 230 * we need bti j at beginning. 231 */ 232 bti j 233 cmp x0, #HVC_SOFT_RESTART 234 b.ne 1f 235 236 /* This is where we're about to jump, staying at EL2 */ 237 msr elr_el2, x1 238 mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL2h) 239 msr spsr_el2, x0 240 241 /* Shuffle the arguments, and don't come back */ 242 mov x0, x2 243 mov x1, x3 244 mov x2, x4 245 b reset 246 2471: cmp x0, #HVC_RESET_VECTORS 248 b.ne 1f 249 250 /* 251 * Set the HVC_RESET_VECTORS return code before entering the common 252 * path so that we do not clobber x0-x2 in case we are coming via 253 * HVC_SOFT_RESTART. 254 */ 255 mov x0, xzr 256reset: 257 /* Reset kvm back to the hyp stub. */ 258 mov_q x5, INIT_SCTLR_EL2_MMU_OFF 259 pre_disable_mmu_workaround 260 msr sctlr_el2, x5 261 isb 262 263alternative_if ARM64_KVM_PROTECTED_MODE 264 mov_q x5, HCR_HOST_NVHE_FLAGS 265 msr hcr_el2, x5 266alternative_else_nop_endif 267 268 /* Install stub vectors */ 269 adr_l x5, __hyp_stub_vectors 270 msr vbar_el2, x5 271 eret 272 2731: /* Bad stub call */ 274 mov_q x0, HVC_STUB_ERR 275 eret 276 277SYM_CODE_END(__kvm_handle_stub_hvc) 278 279/* 280 * void __pkvm_init_switch_pgd(phys_addr_t pgd, unsigned long sp, 281 * void (*fn)(void)); 282 * 283 * SYM_TYPED_FUNC_START() allows C to call this ID-mapped function indirectly 284 * using a physical pointer without triggering a kCFI failure. 285 */ 286SYM_TYPED_FUNC_START(__pkvm_init_switch_pgd) 287 /* Turn the MMU off */ 288 pre_disable_mmu_workaround 289 mrs x3, sctlr_el2 290 bic x4, x3, #SCTLR_ELx_M 291 msr sctlr_el2, x4 292 isb 293 294 tlbi alle2 295 296 /* Install the new pgtables */ 297 phys_to_ttbr x5, x0 298alternative_if ARM64_HAS_CNP 299 orr x5, x5, #TTBR_CNP_BIT 300alternative_else_nop_endif 301 msr ttbr0_el2, x5 302 303 /* Set the new stack pointer */ 304 mov sp, x1 305 306 /* And turn the MMU back on! */ 307 dsb nsh 308 isb 309 set_sctlr_el2 x3 310 ret x2 311SYM_FUNC_END(__pkvm_init_switch_pgd) 312 313 .popsection 314