1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Hypervisor stub 4 * 5 * Copyright (C) 2012 ARM Ltd. 6 * Author: Marc Zyngier <marc.zyngier@arm.com> 7 */ 8 9#include <linux/init.h> 10#include <linux/linkage.h> 11 12#include <asm/assembler.h> 13#include <asm/el2_setup.h> 14#include <asm/kvm_arm.h> 15#include <asm/kvm_asm.h> 16#include <asm/ptrace.h> 17#include <asm/virt.h> 18 19// Warning, hardcoded register allocation 20// This will clobber x1 and x2, and expect x1 to contain 21// the id register value as read from the HW 22.macro __check_override idreg, fld, width, pass, fail 23 ubfx x1, x1, #\fld, #\width 24 cbz x1, \fail 25 26 adr_l x1, \idreg\()_override 27 ldr x2, [x1, FTR_OVR_VAL_OFFSET] 28 ldr x1, [x1, FTR_OVR_MASK_OFFSET] 29 ubfx x2, x2, #\fld, #\width 30 ubfx x1, x1, #\fld, #\width 31 cmp x1, xzr 32 and x2, x2, x1 33 csinv x2, x2, xzr, ne 34 cbnz x2, \pass 35 b \fail 36.endm 37 38.macro check_override idreg, fld, pass, fail 39 mrs x1, \idreg\()_el1 40 __check_override \idreg \fld 4 \pass \fail 41.endm 42 43 .text 44 .pushsection .hyp.text, "ax" 45 46 .align 11 47 48SYM_CODE_START(__hyp_stub_vectors) 49 ventry el2_sync_invalid // Synchronous EL2t 50 ventry el2_irq_invalid // IRQ EL2t 51 ventry el2_fiq_invalid // FIQ EL2t 52 ventry el2_error_invalid // Error EL2t 53 54 ventry elx_sync // Synchronous EL2h 55 ventry el2_irq_invalid // IRQ EL2h 56 ventry el2_fiq_invalid // FIQ EL2h 57 ventry el2_error_invalid // Error EL2h 58 59 ventry elx_sync // Synchronous 64-bit EL1 60 ventry el1_irq_invalid // IRQ 64-bit EL1 61 ventry el1_fiq_invalid // FIQ 64-bit EL1 62 ventry el1_error_invalid // Error 64-bit EL1 63 64 ventry el1_sync_invalid // Synchronous 32-bit EL1 65 ventry el1_irq_invalid // IRQ 32-bit EL1 66 ventry el1_fiq_invalid // FIQ 32-bit EL1 67 ventry el1_error_invalid // Error 32-bit EL1 68SYM_CODE_END(__hyp_stub_vectors) 69 70 .align 11 71 72SYM_CODE_START_LOCAL(elx_sync) 73 cmp x0, #HVC_SET_VECTORS 74 b.ne 1f 75 msr vbar_el2, x1 76 b 9f 77 781: cmp x0, #HVC_FINALISE_EL2 79 b.eq __finalise_el2 80 812: cmp x0, #HVC_SOFT_RESTART 82 b.ne 3f 83 mov x0, x2 84 mov x2, x4 85 mov x4, x1 86 mov x1, x3 87 br x4 // no return 88 893: cmp x0, #HVC_RESET_VECTORS 90 beq 9f // Nothing to reset! 91 92 /* Someone called kvm_call_hyp() against the hyp-stub... */ 93 mov_q x0, HVC_STUB_ERR 94 eret 95 969: mov x0, xzr 97 eret 98SYM_CODE_END(elx_sync) 99 100SYM_CODE_START_LOCAL(__finalise_el2) 101 check_override id_aa64pfr0 ID_AA64PFR0_EL1_SVE_SHIFT .Linit_sve .Lskip_sve 102 103.Linit_sve: /* SVE register access */ 104 mrs x0, cptr_el2 // Disable SVE traps 105 bic x0, x0, #CPTR_EL2_TZ 106 msr cptr_el2, x0 107 isb 108 mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector 109 msr_s SYS_ZCR_EL2, x1 // length for EL1. 110 111.Lskip_sve: 112 check_override id_aa64pfr1 ID_AA64PFR1_EL1_SME_SHIFT .Linit_sme .Lskip_sme 113 114.Linit_sme: /* SME register access and priority mapping */ 115 mrs x0, cptr_el2 // Disable SME traps 116 bic x0, x0, #CPTR_EL2_TSM 117 msr cptr_el2, x0 118 isb 119 120 mrs x1, sctlr_el2 121 orr x1, x1, #SCTLR_ELx_ENTP2 // Disable TPIDR2 traps 122 msr sctlr_el2, x1 123 isb 124 125 mov x0, #0 // SMCR controls 126 127 // Full FP in SM? 128 mrs_s x1, SYS_ID_AA64SMFR0_EL1 129 __check_override id_aa64smfr0 ID_AA64SMFR0_EL1_FA64_SHIFT 1 .Linit_sme_fa64 .Lskip_sme_fa64 130 131.Linit_sme_fa64: 132 orr x0, x0, SMCR_ELx_FA64_MASK 133.Lskip_sme_fa64: 134 135 // ZT0 available? 136 __check_override id_aa64smfr0 ID_AA64SMFR0_EL1_SMEver_SHIFT 4 .Linit_sme_zt0 .Lskip_sme_zt0 137.Linit_sme_zt0: 138 orr x0, x0, SMCR_ELx_EZT0_MASK 139.Lskip_sme_zt0: 140 141 orr x0, x0, #SMCR_ELx_LEN_MASK // Enable full SME vector 142 msr_s SYS_SMCR_EL2, x0 // length for EL1. 143 144 mrs_s x1, SYS_SMIDR_EL1 // Priority mapping supported? 145 ubfx x1, x1, #SMIDR_EL1_SMPS_SHIFT, #1 146 cbz x1, .Lskip_sme 147 148 msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal 149 150 mrs x1, id_aa64mmfr1_el1 // HCRX_EL2 present? 151 ubfx x1, x1, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4 152 cbz x1, .Lskip_sme 153 154 mrs_s x1, SYS_HCRX_EL2 155 orr x1, x1, #HCRX_EL2_SMPME_MASK // Enable priority mapping 156 msr_s SYS_HCRX_EL2, x1 157 158.Lskip_sme: 159 160 // nVHE? No way! Give me the real thing! 161 // Sanity check: MMU *must* be off 162 mrs x1, sctlr_el2 163 tbnz x1, #0, 1f 164 165 // Needs to be VHE capable, obviously 166 check_override id_aa64mmfr1 ID_AA64MMFR1_EL1_VH_SHIFT 2f 1f 167 1681: mov_q x0, HVC_STUB_ERR 169 eret 1702: 171 // Engage the VHE magic! 172 mov_q x0, HCR_HOST_VHE_FLAGS 173 msr hcr_el2, x0 174 isb 175 176 // Use the EL1 allocated stack, per-cpu offset 177 mrs x0, sp_el1 178 mov sp, x0 179 mrs x0, tpidr_el1 180 msr tpidr_el2, x0 181 182 // FP configuration, vectors 183 mrs_s x0, SYS_CPACR_EL12 184 msr cpacr_el1, x0 185 mrs_s x0, SYS_VBAR_EL12 186 msr vbar_el1, x0 187 188 // Use EL2 translations for SPE & TRBE and disable access from EL1 189 mrs x0, mdcr_el2 190 bic x0, x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT) 191 bic x0, x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT) 192 msr mdcr_el2, x0 193 194 // Transfer the MM state from EL1 to EL2 195 mrs_s x0, SYS_TCR_EL12 196 msr tcr_el1, x0 197 mrs_s x0, SYS_TTBR0_EL12 198 msr ttbr0_el1, x0 199 mrs_s x0, SYS_TTBR1_EL12 200 msr ttbr1_el1, x0 201 mrs_s x0, SYS_MAIR_EL12 202 msr mair_el1, x0 203 isb 204 205 // Hack the exception return to stay at EL2 206 mrs x0, spsr_el1 207 and x0, x0, #~PSR_MODE_MASK 208 mov x1, #PSR_MODE_EL2h 209 orr x0, x0, x1 210 msr spsr_el1, x0 211 212 b enter_vhe 213SYM_CODE_END(__finalise_el2) 214 215 // At the point where we reach enter_vhe(), we run with 216 // the MMU off (which is enforced by __finalise_el2()). 217 // We thus need to be in the idmap, or everything will 218 // explode when enabling the MMU. 219 220 .pushsection .idmap.text, "ax" 221 222SYM_CODE_START_LOCAL(enter_vhe) 223 // Invalidate TLBs before enabling the MMU 224 tlbi vmalle1 225 dsb nsh 226 isb 227 228 // Enable the EL2 S1 MMU, as set up from EL1 229 mrs_s x0, SYS_SCTLR_EL12 230 set_sctlr_el1 x0 231 232 // Disable the EL1 S1 MMU for a good measure 233 mov_q x0, INIT_SCTLR_EL1_MMU_OFF 234 msr_s SYS_SCTLR_EL12, x0 235 236 mov x0, xzr 237 238 eret 239SYM_CODE_END(enter_vhe) 240 241 .popsection 242 243.macro invalid_vector label 244SYM_CODE_START_LOCAL(\label) 245 b \label 246SYM_CODE_END(\label) 247.endm 248 249 invalid_vector el2_sync_invalid 250 invalid_vector el2_irq_invalid 251 invalid_vector el2_fiq_invalid 252 invalid_vector el2_error_invalid 253 invalid_vector el1_sync_invalid 254 invalid_vector el1_irq_invalid 255 invalid_vector el1_fiq_invalid 256 invalid_vector el1_error_invalid 257 258 .popsection 259 260/* 261 * __hyp_set_vectors: Call this after boot to set the initial hypervisor 262 * vectors as part of hypervisor installation. On an SMP system, this should 263 * be called on each CPU. 264 * 265 * x0 must be the physical address of the new vector table, and must be 266 * 2KB aligned. 267 * 268 * Before calling this, you must check that the stub hypervisor is installed 269 * everywhere, by waiting for any secondary CPUs to be brought up and then 270 * checking that is_hyp_mode_available() is true. 271 * 272 * If not, there is a pre-existing hypervisor, some CPUs failed to boot, or 273 * something else went wrong... in such cases, trying to install a new 274 * hypervisor is unlikely to work as desired. 275 * 276 * When you call into your shiny new hypervisor, sp_el2 will contain junk, 277 * so you will need to set that to something sensible at the new hypervisor's 278 * initialisation entry point. 279 */ 280 281SYM_FUNC_START(__hyp_set_vectors) 282 mov x1, x0 283 mov x0, #HVC_SET_VECTORS 284 hvc #0 285 ret 286SYM_FUNC_END(__hyp_set_vectors) 287 288SYM_FUNC_START(__hyp_reset_vectors) 289 mov x0, #HVC_RESET_VECTORS 290 hvc #0 291 ret 292SYM_FUNC_END(__hyp_reset_vectors) 293 294/* 295 * Entry point to finalise EL2 and switch to VHE if deemed capable 296 * 297 * w0: boot mode, as returned by init_kernel_el() 298 */ 299SYM_FUNC_START(finalise_el2) 300 // Need to have booted at EL2 301 cmp w0, #BOOT_CPU_MODE_EL2 302 b.ne 1f 303 304 // and still be at EL1 305 mrs x0, CurrentEL 306 cmp x0, #CurrentEL_EL1 307 b.ne 1f 308 309 mov x0, #HVC_FINALISE_EL2 310 hvc #0 3111: 312 ret 313SYM_FUNC_END(finalise_el2) 314