1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7#include <linux/arm-smccc.h> 8#include <linux/linkage.h> 9 10#include <asm/alternative.h> 11#include <asm/assembler.h> 12#include <asm/el2_setup.h> 13#include <asm/kvm_arm.h> 14#include <asm/kvm_asm.h> 15#include <asm/kvm_mmu.h> 16#include <asm/pgtable-hwdef.h> 17#include <asm/sysreg.h> 18#include <asm/virt.h> 19 20 .text 21 .pushsection .idmap.text, "ax" 22 23 .align 11 24 25SYM_CODE_START(__kvm_hyp_init) 26 ventry __invalid // Synchronous EL2t 27 ventry __invalid // IRQ EL2t 28 ventry __invalid // FIQ EL2t 29 ventry __invalid // Error EL2t 30 31 ventry __invalid // Synchronous EL2h 32 ventry __invalid // IRQ EL2h 33 ventry __invalid // FIQ EL2h 34 ventry __invalid // Error EL2h 35 36 ventry __do_hyp_init // Synchronous 64-bit EL1 37 ventry __invalid // IRQ 64-bit EL1 38 ventry __invalid // FIQ 64-bit EL1 39 ventry __invalid // Error 64-bit EL1 40 41 ventry __invalid // Synchronous 32-bit EL1 42 ventry __invalid // IRQ 32-bit EL1 43 ventry __invalid // FIQ 32-bit EL1 44 ventry __invalid // Error 32-bit EL1 45 46__invalid: 47 b . 48 49 /* 50 * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers. 51 * 52 * x0: SMCCC function ID 53 * x1: struct kvm_nvhe_init_params PA 54 */ 55__do_hyp_init: 56 /* Check for a stub HVC call */ 57 cmp x0, #HVC_STUB_HCALL_NR 58 b.lo __kvm_handle_stub_hvc 59 60 mov x3, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) 61 cmp x0, x3 62 b.eq 1f 63 64 mov x0, #SMCCC_RET_NOT_SUPPORTED 65 eret 66 671: mov x0, x1 68 mov x3, lr 69 bl ___kvm_hyp_init // Clobbers x0..x2 70 mov lr, x3 71 72 /* Hello, World! */ 73 mov x0, #SMCCC_RET_SUCCESS 74 eret 75SYM_CODE_END(__kvm_hyp_init) 76 77/* 78 * Initialize the hypervisor in EL2. 79 * 80 * Only uses x0..x2 so as to not clobber callee-saved SMCCC registers 81 * and leave x3 for the caller. 82 * 83 * x0: struct kvm_nvhe_init_params PA 84 */ 85SYM_CODE_START_LOCAL(___kvm_hyp_init) 86 ldr x1, [x0, #NVHE_INIT_TPIDR_EL2] 87 msr tpidr_el2, x1 88 89 ldr x1, [x0, #NVHE_INIT_STACK_HYP_VA] 90 mov sp, x1 91 92 ldr x1, [x0, #NVHE_INIT_MAIR_EL2] 93 msr mair_el2, x1 94 95 ldr x1, [x0, #NVHE_INIT_HCR_EL2] 96 msr hcr_el2, x1 97 98 mov x2, #HCR_E2H 99 and x2, x1, x2 100 cbz x2, 1f 101 102 mrs x1, cnthctl_el2 103 and x1, x1, #~(BIT(0) | BIT(1)) 104 orr x1, x1, #(BIT(10) | BIT(11)) 105 msr cnthctl_el2, x1 1061: 107 ldr x1, [x0, #NVHE_INIT_VTTBR] 108 msr vttbr_el2, x1 109 110 ldr x1, [x0, #NVHE_INIT_VTCR] 111 msr vtcr_el2, x1 112 113 ldr x1, [x0, #NVHE_INIT_PGD_PA] 114 phys_to_ttbr x2, x1 115alternative_if ARM64_HAS_CNP 116 orr x2, x2, #TTBR_CNP_BIT 117alternative_else_nop_endif 118 msr ttbr0_el2, x2 119 120 /* 121 * Set the PS bits in TCR_EL2. 122 */ 123 ldr x0, [x0, #NVHE_INIT_TCR_EL2] 124 tcr_compute_pa_size x0, #TCR_EL2_PS_SHIFT, x1, x2 125 msr tcr_el2, x0 126 127 isb 128 129 /* Invalidate the stale TLBs from Bootloader */ 130 tlbi alle2 131 tlbi vmalls12e1 132 dsb sy 133 134 mov_q x0, INIT_SCTLR_EL2_MMU_ON 135alternative_if ARM64_HAS_ADDRESS_AUTH 136 mov_q x1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \ 137 SCTLR_ELx_ENDA | SCTLR_ELx_ENDB) 138 orr x0, x0, x1 139alternative_else_nop_endif 140 msr sctlr_el2, x0 141 isb 142 143 /* Set the host vector */ 144 ldr x0, =__kvm_hyp_host_vector 145 msr vbar_el2, x0 146 147 ret 148SYM_CODE_END(___kvm_hyp_init) 149 150/* 151 * PSCI CPU_ON entry point 152 * 153 * x0: struct kvm_nvhe_init_params PA 154 */ 155SYM_CODE_START(kvm_hyp_cpu_entry) 156 mov x1, #1 // is_cpu_on = true 157 b __kvm_hyp_init_cpu 158SYM_CODE_END(kvm_hyp_cpu_entry) 159 160/* 161 * PSCI CPU_SUSPEND / SYSTEM_SUSPEND entry point 162 * 163 * x0: struct kvm_nvhe_init_params PA 164 */ 165SYM_CODE_START(kvm_hyp_cpu_resume) 166 mov x1, #0 // is_cpu_on = false 167 b __kvm_hyp_init_cpu 168SYM_CODE_END(kvm_hyp_cpu_resume) 169 170/* 171 * Common code for CPU entry points. Initializes EL2 state and 172 * installs the hypervisor before handing over to a C handler. 173 * 174 * x0: struct kvm_nvhe_init_params PA 175 * x1: bool is_cpu_on 176 */ 177SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu) 178 mov x28, x0 // Stash arguments 179 mov x29, x1 180 181 /* Check that the core was booted in EL2. */ 182 mrs x0, CurrentEL 183 cmp x0, #CurrentEL_EL2 184 b.eq 2f 185 186 /* The core booted in EL1. KVM cannot be initialized on it. */ 1871: wfe 188 wfi 189 b 1b 190 1912: msr SPsel, #1 // We want to use SP_EL{1,2} 192 193 /* Initialize EL2 CPU state to sane values. */ 194 init_el2_state // Clobbers x0..x2 195 finalise_el2_state 196 197 /* Enable MMU, set vectors and stack. */ 198 mov x0, x28 199 bl ___kvm_hyp_init // Clobbers x0..x2 200 201 /* Leave idmap. */ 202 mov x0, x29 203 ldr x1, =kvm_host_psci_cpu_entry 204 br x1 205SYM_CODE_END(__kvm_hyp_init_cpu) 206 207SYM_CODE_START(__kvm_handle_stub_hvc) 208 cmp x0, #HVC_SOFT_RESTART 209 b.ne 1f 210 211 /* This is where we're about to jump, staying at EL2 */ 212 msr elr_el2, x1 213 mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL2h) 214 msr spsr_el2, x0 215 216 /* Shuffle the arguments, and don't come back */ 217 mov x0, x2 218 mov x1, x3 219 mov x2, x4 220 b reset 221 2221: cmp x0, #HVC_RESET_VECTORS 223 b.ne 1f 224 225 /* 226 * Set the HVC_RESET_VECTORS return code before entering the common 227 * path so that we do not clobber x0-x2 in case we are coming via 228 * HVC_SOFT_RESTART. 229 */ 230 mov x0, xzr 231reset: 232 /* Reset kvm back to the hyp stub. */ 233 mov_q x5, INIT_SCTLR_EL2_MMU_OFF 234 pre_disable_mmu_workaround 235 msr sctlr_el2, x5 236 isb 237 238alternative_if ARM64_KVM_PROTECTED_MODE 239 mov_q x5, HCR_HOST_NVHE_FLAGS 240 msr hcr_el2, x5 241alternative_else_nop_endif 242 243 /* Install stub vectors */ 244 adr_l x5, __hyp_stub_vectors 245 msr vbar_el2, x5 246 eret 247 2481: /* Bad stub call */ 249 mov_q x0, HVC_STUB_ERR 250 eret 251 252SYM_CODE_END(__kvm_handle_stub_hvc) 253 254SYM_FUNC_START(__pkvm_init_switch_pgd) 255 /* Turn the MMU off */ 256 pre_disable_mmu_workaround 257 mrs x2, sctlr_el2 258 bic x3, x2, #SCTLR_ELx_M 259 msr sctlr_el2, x3 260 isb 261 262 tlbi alle2 263 264 /* Install the new pgtables */ 265 ldr x3, [x0, #NVHE_INIT_PGD_PA] 266 phys_to_ttbr x4, x3 267alternative_if ARM64_HAS_CNP 268 orr x4, x4, #TTBR_CNP_BIT 269alternative_else_nop_endif 270 msr ttbr0_el2, x4 271 272 /* Set the new stack pointer */ 273 ldr x0, [x0, #NVHE_INIT_STACK_HYP_VA] 274 mov sp, x0 275 276 /* And turn the MMU back on! */ 277 set_sctlr_el2 x2 278 ret x1 279SYM_FUNC_END(__pkvm_init_switch_pgd) 280 281 .popsection 282