1/* 2 * Copyright (C) 2015 - ARM Ltd 3 * Author: Marc Zyngier <marc.zyngier@arm.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18#include <linux/linkage.h> 19 20#include <asm/alternative.h> 21#include <asm/assembler.h> 22#include <asm/cpufeature.h> 23#include <asm/kvm_arm.h> 24#include <asm/kvm_asm.h> 25#include <asm/kvm_mmu.h> 26 27 .text 28 .pushsection .hyp.text, "ax" 29 30.macro do_el2_call 31 /* 32 * Shuffle the parameters before calling the function 33 * pointed to in x0. Assumes parameters in x[1,2,3]. 34 */ 35 str lr, [sp, #-16]! 36 mov lr, x0 37 mov x0, x1 38 mov x1, x2 39 mov x2, x3 40 blr lr 41 ldr lr, [sp], #16 42.endm 43 44ENTRY(__vhe_hyp_call) 45 do_el2_call 46 /* 47 * We used to rely on having an exception return to get 48 * an implicit isb. In the E2H case, we don't have it anymore. 49 * rather than changing all the leaf functions, just do it here 50 * before returning to the rest of the kernel. 51 */ 52 isb 53 ret 54ENDPROC(__vhe_hyp_call) 55 56el1_sync: // Guest trapped into EL2 57 stp x0, x1, [sp, #-16]! 58 59alternative_if_not ARM64_HAS_VIRT_HOST_EXTN 60 mrs x1, esr_el2 61alternative_else 62 mrs x1, esr_el1 63alternative_endif 64 lsr x0, x1, #ESR_ELx_EC_SHIFT 65 66 cmp x0, #ESR_ELx_EC_HVC64 67 b.ne el1_trap 68 69 mrs x1, vttbr_el2 // If vttbr is valid, the 64bit guest 70 cbnz x1, el1_trap // called HVC 71 72 /* Here, we're pretty sure the host called HVC. */ 73 ldp x0, x1, [sp], #16 74 75 /* Check for a stub HVC call */ 76 cmp x0, #HVC_STUB_HCALL_NR 77 b.hs 1f 78 79 /* 80 * Compute the idmap address of __kvm_handle_stub_hvc and 81 * jump there. Since we use kimage_voffset, do not use the 82 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead 83 * (by loading it from the constant pool). 84 * 85 * Preserve x0-x4, which may contain stub parameters. 86 */ 87 ldr x5, =__kvm_handle_stub_hvc 88 ldr_l x6, kimage_voffset 89 90 /* x5 = __pa(x5) */ 91 sub x5, x5, x6 92 br x5 93 941: 95 /* 96 * Perform the EL2 call 97 */ 98 kern_hyp_va x0 99 do_el2_call 100 101 eret 102 103el1_trap: 104 /* 105 * x0: ESR_EC 106 */ 107 ldr x1, [sp, #16 + 8] // vcpu stored by __guest_enter 108 109 /* 110 * We trap the first access to the FP/SIMD to save the host context 111 * and restore the guest context lazily. 112 * If FP/SIMD is not implemented, handle the trap and inject an 113 * undefined instruction exception to the guest. 114 */ 115alternative_if_not ARM64_HAS_NO_FPSIMD 116 cmp x0, #ESR_ELx_EC_FP_ASIMD 117 b.eq __fpsimd_guest_restore 118alternative_else_nop_endif 119 120 mov x0, #ARM_EXCEPTION_TRAP 121 b __guest_exit 122 123el1_irq: 124 stp x0, x1, [sp, #-16]! 125 ldr x1, [sp, #16 + 8] 126 mov x0, #ARM_EXCEPTION_IRQ 127 b __guest_exit 128 129el1_error: 130 stp x0, x1, [sp, #-16]! 131 ldr x1, [sp, #16 + 8] 132 mov x0, #ARM_EXCEPTION_EL1_SERROR 133 b __guest_exit 134 135el2_error: 136 /* 137 * Only two possibilities: 138 * 1) Either we come from the exit path, having just unmasked 139 * PSTATE.A: change the return code to an EL2 fault, and 140 * carry on, as we're already in a sane state to handle it. 141 * 2) Or we come from anywhere else, and that's a bug: we panic. 142 * 143 * For (1), x0 contains the original return code and x1 doesn't 144 * contain anything meaningful at that stage. We can reuse them 145 * as temp registers. 146 * For (2), who cares? 147 */ 148 mrs x0, elr_el2 149 adr x1, abort_guest_exit_start 150 cmp x0, x1 151 adr x1, abort_guest_exit_end 152 ccmp x0, x1, #4, ne 153 b.ne __hyp_panic 154 mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) 155 eret 156 157ENTRY(__hyp_do_panic) 158 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ 159 PSR_MODE_EL1h) 160 msr spsr_el2, lr 161 ldr lr, =panic 162 msr elr_el2, lr 163 eret 164ENDPROC(__hyp_do_panic) 165 166ENTRY(__hyp_panic) 167 /* 168 * '=kvm_host_cpu_state' is a host VA from the constant pool, it may 169 * not be accessible by this address from EL2, hyp_panic() converts 170 * it with kern_hyp_va() before use. 171 */ 172 ldr x0, =kvm_host_cpu_state 173 mrs x1, tpidr_el2 174 add x0, x0, x1 175 b hyp_panic 176ENDPROC(__hyp_panic) 177 178.macro invalid_vector label, target = __hyp_panic 179 .align 2 180\label: 181 b \target 182ENDPROC(\label) 183.endm 184 185 /* None of these should ever happen */ 186 invalid_vector el2t_sync_invalid 187 invalid_vector el2t_irq_invalid 188 invalid_vector el2t_fiq_invalid 189 invalid_vector el2t_error_invalid 190 invalid_vector el2h_sync_invalid 191 invalid_vector el2h_irq_invalid 192 invalid_vector el2h_fiq_invalid 193 invalid_vector el1_sync_invalid 194 invalid_vector el1_irq_invalid 195 invalid_vector el1_fiq_invalid 196 197 .ltorg 198 199 .align 11 200 201ENTRY(__kvm_hyp_vector) 202 ventry el2t_sync_invalid // Synchronous EL2t 203 ventry el2t_irq_invalid // IRQ EL2t 204 ventry el2t_fiq_invalid // FIQ EL2t 205 ventry el2t_error_invalid // Error EL2t 206 207 ventry el2h_sync_invalid // Synchronous EL2h 208 ventry el2h_irq_invalid // IRQ EL2h 209 ventry el2h_fiq_invalid // FIQ EL2h 210 ventry el2_error // Error EL2h 211 212 ventry el1_sync // Synchronous 64-bit EL1 213 ventry el1_irq // IRQ 64-bit EL1 214 ventry el1_fiq_invalid // FIQ 64-bit EL1 215 ventry el1_error // Error 64-bit EL1 216 217 ventry el1_sync // Synchronous 32-bit EL1 218 ventry el1_irq // IRQ 32-bit EL1 219 ventry el1_fiq_invalid // FIQ 32-bit EL1 220 ventry el1_error // Error 32-bit EL1 221ENDPROC(__kvm_hyp_vector) 222