1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Hypervisor stub 4 * 5 * Copyright (C) 2012 ARM Ltd. 6 * Author: Marc Zyngier <marc.zyngier@arm.com> 7 */ 8 9#include <linux/init.h> 10#include <linux/linkage.h> 11 12#include <asm/assembler.h> 13#include <asm/el2_setup.h> 14#include <asm/kvm_arm.h> 15#include <asm/kvm_asm.h> 16#include <asm/ptrace.h> 17#include <asm/virt.h> 18 19 .text 20 .pushsection .hyp.text, "ax" 21 22 .align 11 23 24SYM_CODE_START(__hyp_stub_vectors) 25 ventry el2_sync_invalid // Synchronous EL2t 26 ventry el2_irq_invalid // IRQ EL2t 27 ventry el2_fiq_invalid // FIQ EL2t 28 ventry el2_error_invalid // Error EL2t 29 30 ventry elx_sync // Synchronous EL2h 31 ventry el2_irq_invalid // IRQ EL2h 32 ventry el2_fiq_invalid // FIQ EL2h 33 ventry el2_error_invalid // Error EL2h 34 35 ventry elx_sync // Synchronous 64-bit EL1 36 ventry el1_irq_invalid // IRQ 64-bit EL1 37 ventry el1_fiq_invalid // FIQ 64-bit EL1 38 ventry el1_error_invalid // Error 64-bit EL1 39 40 ventry el1_sync_invalid // Synchronous 32-bit EL1 41 ventry el1_irq_invalid // IRQ 32-bit EL1 42 ventry el1_fiq_invalid // FIQ 32-bit EL1 43 ventry el1_error_invalid // Error 32-bit EL1 44SYM_CODE_END(__hyp_stub_vectors) 45 46 .align 11 47 48SYM_CODE_START_LOCAL(elx_sync) 49 cmp x0, #HVC_SET_VECTORS 50 b.ne 1f 51 msr vbar_el2, x1 52 b 9f 53 541: cmp x0, #HVC_FINALISE_EL2 55 b.eq __finalise_el2 56 57 cmp x0, #HVC_GET_ICH_VTR_EL2 58 b.ne 2f 59 mrs_s x1, SYS_ICH_VTR_EL2 60 b 9f 61 622: cmp x0, #HVC_SOFT_RESTART 63 b.ne 3f 64 mov x0, x2 65 mov x2, x4 66 mov x4, x1 67 mov x1, x3 68 br x4 // no return 69 703: cmp x0, #HVC_RESET_VECTORS 71 beq 9f // Nothing to reset! 72 73 /* Someone called kvm_call_hyp() against the hyp-stub... */ 74 mov_q x0, HVC_STUB_ERR 75 eret 76 779: mov x0, xzr 78 eret 79SYM_CODE_END(elx_sync) 80 81SYM_CODE_START_LOCAL(__finalise_el2) 82 finalise_el2_state 83 84 // nVHE? No way! Give me the real thing! 85 // Sanity check: MMU *must* be off 86 mrs x1, sctlr_el2 87 tbnz x1, #0, 1f 88 89 // Needs to be VHE capable, obviously 90 check_override id_aa64mmfr1 ID_AA64MMFR1_EL1_VH_SHIFT 0f 1f x1 x2 91 920: // Check whether we only want the hypervisor to run VHE, not the kernel 93 adr_l x1, arm64_sw_feature_override 94 ldr x2, [x1, FTR_OVR_VAL_OFFSET] 95 ldr x1, [x1, FTR_OVR_MASK_OFFSET] 96 and x2, x2, x1 97 ubfx x2, x2, #ARM64_SW_FEATURE_OVERRIDE_HVHE, #4 98 cbz x2, 2f 99 1001: mov_q x0, HVC_STUB_ERR 101 eret 1022: 103 // Engage the VHE magic! 104 mov_q x0, HCR_HOST_VHE_FLAGS 105 msr_hcr_el2 x0 106 107 // Use the EL1 allocated stack, per-cpu offset 108 mrs x0, sp_el1 109 mov sp, x0 110 mrs x0, tpidr_el1 111 msr tpidr_el2, x0 112 113 // FP configuration, vectors 114 mrs_s x0, SYS_CPACR_EL12 115 msr cpacr_el1, x0 116 mrs_s x0, SYS_VBAR_EL12 117 msr vbar_el1, x0 118 119 // Use EL2 translations for SPE & TRBE and disable access from EL1 120 mrs x0, mdcr_el2 121 bic x0, x0, #MDCR_EL2_E2PB_MASK 122 bic x0, x0, #MDCR_EL2_E2TB_MASK 123 msr mdcr_el2, x0 124 125 // Transfer the MM state from EL1 to EL2 126 mrs_s x0, SYS_TCR_EL12 127 msr tcr_el1, x0 128 mrs_s x0, SYS_TTBR0_EL12 129 msr ttbr0_el1, x0 130 mrs_s x0, SYS_TTBR1_EL12 131 msr ttbr1_el1, x0 132 mrs_s x0, SYS_MAIR_EL12 133 msr mair_el1, x0 134 mrs x1, REG_ID_AA64MMFR3_EL1 135 ubfx x1, x1, #ID_AA64MMFR3_EL1_TCRX_SHIFT, #4 136 cbz x1, .Lskip_tcr2 137 mrs x0, REG_TCR2_EL12 138 msr REG_TCR2_EL1, x0 139 140 // Transfer permission indirection state 141 mrs x1, REG_ID_AA64MMFR3_EL1 142 ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4 143 cbz x1, .Lskip_indirection 144 mrs x0, REG_PIRE0_EL12 145 msr REG_PIRE0_EL1, x0 146 mrs x0, REG_PIR_EL12 147 msr REG_PIR_EL1, x0 148 149.Lskip_indirection: 150.Lskip_tcr2: 151 152 isb 153 154 // Hack the exception return to stay at EL2 155 mrs x0, spsr_el1 156 and x0, x0, #~PSR_MODE_MASK 157 mov x1, #PSR_MODE_EL2h 158 orr x0, x0, x1 159 msr spsr_el1, x0 160 161 b enter_vhe 162SYM_CODE_END(__finalise_el2) 163 164 // At the point where we reach enter_vhe(), we run with 165 // the MMU off (which is enforced by __finalise_el2()). 166 // We thus need to be in the idmap, or everything will 167 // explode when enabling the MMU. 168 169 .pushsection .idmap.text, "ax" 170 171SYM_CODE_START_LOCAL(enter_vhe) 172 // Invalidate TLBs before enabling the MMU 173 tlbi vmalle1 174 dsb nsh 175 isb 176 177 // Enable the EL2 S1 MMU, as set up from EL1 178 mrs_s x0, SYS_SCTLR_EL12 179 set_sctlr_el1 x0 180 181 // Disable the EL1 S1 MMU for a good measure 182 mov_q x0, INIT_SCTLR_EL1_MMU_OFF 183 msr_s SYS_SCTLR_EL12, x0 184 185 mov x0, xzr 186 187 eret 188SYM_CODE_END(enter_vhe) 189 190 .popsection 191 192.macro invalid_vector label 193SYM_CODE_START_LOCAL(\label) 194 b \label 195SYM_CODE_END(\label) 196.endm 197 198 invalid_vector el2_sync_invalid 199 invalid_vector el2_irq_invalid 200 invalid_vector el2_fiq_invalid 201 invalid_vector el2_error_invalid 202 invalid_vector el1_sync_invalid 203 invalid_vector el1_irq_invalid 204 invalid_vector el1_fiq_invalid 205 invalid_vector el1_error_invalid 206 207 .popsection 208 209/* 210 * __hyp_set_vectors: Call this after boot to set the initial hypervisor 211 * vectors as part of hypervisor installation. On an SMP system, this should 212 * be called on each CPU. 213 * 214 * x0 must be the physical address of the new vector table, and must be 215 * 2KB aligned. 216 * 217 * Before calling this, you must check that the stub hypervisor is installed 218 * everywhere, by waiting for any secondary CPUs to be brought up and then 219 * checking that is_hyp_mode_available() is true. 220 * 221 * If not, there is a pre-existing hypervisor, some CPUs failed to boot, or 222 * something else went wrong... in such cases, trying to install a new 223 * hypervisor is unlikely to work as desired. 224 * 225 * When you call into your shiny new hypervisor, sp_el2 will contain junk, 226 * so you will need to set that to something sensible at the new hypervisor's 227 * initialisation entry point. 228 */ 229 230SYM_FUNC_START(__hyp_set_vectors) 231 mov x1, x0 232 mov x0, #HVC_SET_VECTORS 233 hvc #0 234 ret 235SYM_FUNC_END(__hyp_set_vectors) 236 237SYM_FUNC_START(__hyp_reset_vectors) 238 mov x0, #HVC_RESET_VECTORS 239 hvc #0 240 ret 241SYM_FUNC_END(__hyp_reset_vectors) 242 243/* 244 * Entry point to finalise EL2 and switch to VHE if deemed capable 245 * 246 * w0: boot mode, as returned by init_kernel_el() 247 */ 248SYM_FUNC_START(finalise_el2) 249 // Need to have booted at EL2 250 cmp w0, #BOOT_CPU_MODE_EL2 251 b.ne 1f 252 253 // and still be at EL1 254 mrs x0, CurrentEL 255 cmp x0, #CurrentEL_EL1 256 b.ne 1f 257 258 mov x0, #HVC_FINALISE_EL2 259 hvc #0 2601: 261 ret 262SYM_FUNC_END(finalise_el2) 263