1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (C) 2018 Alexandru Elisei <alexandru.elisei@gmail.com> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 #include <sys/types.h> 30 #include <sys/systm.h> 31 #include <sys/kernel.h> 32 #include <sys/lock.h> 33 34 #include <machine/cpu.h> 35 #include <machine/hypervisor.h> 36 37 #include "arm64.h" 38 #include "reset.h" 39 40 /* 41 * Make the architecturally UNKNOWN value 0. As a bonus, we don't have to 42 * manually set all those RES0 fields. 43 */ 44 #define ARCH_UNKNOWN 0 45 #define set_arch_unknown(reg) (memset(&(reg), ARCH_UNKNOWN, sizeof(reg))) 46 47 void 48 reset_vm_el01_regs(void *vcpu) 49 { 50 struct hypctx *el2ctx; 51 52 el2ctx = vcpu; 53 54 set_arch_unknown(el2ctx->tf); 55 56 set_arch_unknown(el2ctx->actlr_el1); 57 set_arch_unknown(el2ctx->afsr0_el1); 58 set_arch_unknown(el2ctx->afsr1_el1); 59 set_arch_unknown(el2ctx->amair_el1); 60 set_arch_unknown(el2ctx->contextidr_el1); 61 set_arch_unknown(el2ctx->cpacr_el1); 62 set_arch_unknown(el2ctx->csselr_el1); 63 set_arch_unknown(el2ctx->elr_el1); 64 set_arch_unknown(el2ctx->esr_el1); 65 set_arch_unknown(el2ctx->far_el1); 66 set_arch_unknown(el2ctx->mair_el1); 67 set_arch_unknown(el2ctx->mdccint_el1); 68 set_arch_unknown(el2ctx->mdscr_el1); 69 set_arch_unknown(el2ctx->par_el1); 70 71 /* 72 * Guest starts with: 73 * ~SCTLR_M: MMU off 74 * ~SCTLR_C: data cache off 75 * SCTLR_CP15BEN: memory barrier instruction enable from EL0; RAO/WI 76 * ~SCTLR_I: instruction cache off 77 */ 78 el2ctx->sctlr_el1 = SCTLR_RES1; 79 el2ctx->sctlr_el1 &= ~SCTLR_M & ~SCTLR_C & ~SCTLR_I; 80 el2ctx->sctlr_el1 |= SCTLR_CP15BEN; 81 82 set_arch_unknown(el2ctx->sp_el0); 83 set_arch_unknown(el2ctx->tcr_el1); 84 set_arch_unknown(el2ctx->tpidr_el0); 85 set_arch_unknown(el2ctx->tpidr_el1); 86 set_arch_unknown(el2ctx->tpidrro_el0); 87 set_arch_unknown(el2ctx->ttbr0_el1); 88 set_arch_unknown(el2ctx->ttbr1_el1); 89 set_arch_unknown(el2ctx->vbar_el1); 90 set_arch_unknown(el2ctx->spsr_el1); 91 92 set_arch_unknown(el2ctx->dbgbcr_el1); 93 set_arch_unknown(el2ctx->dbgbvr_el1); 94 set_arch_unknown(el2ctx->dbgwcr_el1); 95 set_arch_unknown(el2ctx->dbgwvr_el1); 96 97 el2ctx->pmcr_el0 = READ_SPECIALREG(pmcr_el0) & PMCR_N_MASK; 98 /* PMCR_LC is unknown when AArch32 is supported or RES1 otherwise */ 99 el2ctx->pmcr_el0 |= PMCR_LC; 100 set_arch_unknown(el2ctx->pmccntr_el0); 101 set_arch_unknown(el2ctx->pmccfiltr_el0); 102 set_arch_unknown(el2ctx->pmuserenr_el0); 103 set_arch_unknown(el2ctx->pmselr_el0); 104 set_arch_unknown(el2ctx->pmxevcntr_el0); 105 set_arch_unknown(el2ctx->pmcntenset_el0); 106 set_arch_unknown(el2ctx->pmintenset_el1); 107 set_arch_unknown(el2ctx->pmovsset_el0); 108 memset(el2ctx->pmevcntr_el0, 0, sizeof(el2ctx->pmevcntr_el0)); 109 memset(el2ctx->pmevtyper_el0, 0, sizeof(el2ctx->pmevtyper_el0)); 110 } 111 112 void 113 reset_vm_el2_regs(void *vcpu) 114 { 115 struct hypctx *el2ctx; 116 uint64_t cpu_aff, vcpuid; 117 118 el2ctx = vcpu; 119 vcpuid = vcpu_vcpuid(el2ctx->vcpu); 120 121 /* 122 * Set the Hypervisor Configuration Register: 123 * 124 * HCR_RW: use AArch64 for EL1 125 * HCR_TID3: handle ID registers in the vmm to privide a common 126 * set of featers on all vcpus 127 * HCR_TWI: Trap WFI to the hypervisor 128 * HCR_BSU_IS: barrier instructions apply to the inner shareable 129 * domain 130 * HCR_FB: broadcast maintenance operations 131 * HCR_AMO: route physical SError interrupts to EL2 132 * HCR_IMO: route physical IRQ interrupts to EL2 133 * HCR_FMO: route physical FIQ interrupts to EL2 134 * HCR_SWIO: turn set/way invalidate into set/way clean and 135 * invalidate 136 * HCR_VM: use stage 2 translation 137 */ 138 el2ctx->hcr_el2 = HCR_RW | HCR_TID3 | HCR_TWI | HCR_BSU_IS | HCR_FB | 139 HCR_AMO | HCR_IMO | HCR_FMO | HCR_SWIO | HCR_VM; 140 if (in_vhe()) { 141 el2ctx->hcr_el2 |= HCR_E2H; 142 } 143 144 /* Set the Extended Hypervisor Configuration Register */ 145 el2ctx->hcrx_el2 = 0; 146 /* TODO: Trap all extensions we don't support */ 147 el2ctx->mdcr_el2 = MDCR_EL2_TDOSA | MDCR_EL2_TDRA | MDCR_EL2_TPMS | 148 MDCR_EL2_TTRF; 149 /* PMCR_EL0.N is read from MDCR_EL2.HPMN */ 150 el2ctx->mdcr_el2 |= (el2ctx->pmcr_el0 & PMCR_N_MASK) >> PMCR_N_SHIFT; 151 152 el2ctx->vmpidr_el2 = VMPIDR_EL2_RES1; 153 /* The guest will detect a multi-core, single-threaded CPU */ 154 el2ctx->vmpidr_el2 &= ~VMPIDR_EL2_U & ~VMPIDR_EL2_MT; 155 /* 156 * Generate the guest MPIDR value. We only support 16 CPUs at affinity 157 * level 0 to simplify the vgicv3 driver (see writing sgi1r_el1). 158 */ 159 cpu_aff = (vcpuid & 0xf) << MPIDR_AFF0_SHIFT | 160 ((vcpuid >> 4) & 0xff) << MPIDR_AFF1_SHIFT | 161 ((vcpuid >> 12) & 0xff) << MPIDR_AFF2_SHIFT | 162 ((vcpuid >> 20) & 0xff) << MPIDR_AFF3_SHIFT; 163 el2ctx->vmpidr_el2 |= cpu_aff; 164 165 /* Use the same CPU identification information as the host */ 166 el2ctx->vpidr_el2 = CPU_IMPL_TO_MIDR(CPU_IMPL_ARM); 167 el2ctx->vpidr_el2 |= CPU_VAR_TO_MIDR(0); 168 el2ctx->vpidr_el2 |= CPU_ARCH_TO_MIDR(0xf); 169 el2ctx->vpidr_el2 |= CPU_PART_TO_MIDR(CPU_PART_FOUNDATION); 170 el2ctx->vpidr_el2 |= CPU_REV_TO_MIDR(0); 171 172 /* 173 * Don't trap accesses to CPACR_EL1, trace, SVE, Advanced SIMD 174 * and floating point functionality to EL2. 175 */ 176 if (in_vhe()) 177 el2ctx->cptr_el2 = CPTR_E2H_TRAP_ALL | CPTR_E2H_FPEN; 178 else 179 el2ctx->cptr_el2 = CPTR_TRAP_ALL & ~CPTR_TFP; 180 el2ctx->cptr_el2 &= ~CPTR_TCPAC; 181 /* 182 * Disable interrupts in the guest. The guest OS will re-enable 183 * them. 184 */ 185 el2ctx->tf.tf_spsr = PSR_D | PSR_A | PSR_I | PSR_F; 186 /* Use the EL1 stack when taking exceptions to EL1 */ 187 el2ctx->tf.tf_spsr |= PSR_M_EL1h; 188 } 189