1 /* 2 * Copyright (C) 2012,2013 - ARM Ltd 3 * Author: Marc Zyngier <marc.zyngier@arm.com> 4 * 5 * Derived from arch/arm/kvm/handle_exit.c: 6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 7 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include <linux/kvm.h> 23 #include <linux/kvm_host.h> 24 25 #include <asm/esr.h> 26 #include <asm/kvm_asm.h> 27 #include <asm/kvm_coproc.h> 28 #include <asm/kvm_emulate.h> 29 #include <asm/kvm_mmu.h> 30 #include <asm/kvm_psci.h> 31 32 #define CREATE_TRACE_POINTS 33 #include "trace.h" 34 35 typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *); 36 37 static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) 38 { 39 int ret; 40 41 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0), 42 kvm_vcpu_hvc_get_imm(vcpu)); 43 vcpu->stat.hvc_exit_stat++; 44 45 ret = kvm_psci_call(vcpu); 46 if (ret < 0) { 47 kvm_inject_undefined(vcpu); 48 return 1; 49 } 50 51 return ret; 52 } 53 54 static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) 55 { 56 kvm_inject_undefined(vcpu); 57 return 1; 58 } 59 60 /* 61 * Guest access to FP/ASIMD registers are routed to this handler only 62 * when the system doesn't support FP/ASIMD. 63 */ 64 static int handle_no_fpsimd(struct kvm_vcpu *vcpu, struct kvm_run *run) 65 { 66 kvm_inject_undefined(vcpu); 67 return 1; 68 } 69 70 /** 71 * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event 72 * instruction executed by a guest 73 * 74 * @vcpu: the vcpu pointer 75 * 76 * WFE: Yield the CPU and come back to this vcpu when the scheduler 77 * decides to. 78 * WFI: Simply call kvm_vcpu_block(), which will halt execution of 79 * world-switches and schedule other host processes until there is an 80 * incoming IRQ or FIQ to the VM. 81 */ 82 static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) 83 { 84 if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) { 85 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true); 86 vcpu->stat.wfe_exit_stat++; 87 kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu)); 88 } else { 89 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false); 90 vcpu->stat.wfi_exit_stat++; 91 kvm_vcpu_block(vcpu); 92 kvm_clear_request(KVM_REQ_UNHALT, vcpu); 93 } 94 95 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 96 97 return 1; 98 } 99 100 /** 101 * kvm_handle_guest_debug - handle a debug exception instruction 102 * 103 * @vcpu: the vcpu pointer 104 * @run: access to the kvm_run structure for results 105 * 106 * We route all debug exceptions through the same handler. If both the 107 * guest and host are using the same debug facilities it will be up to 108 * userspace to re-inject the correct exception for guest delivery. 109 * 110 * @return: 0 (while setting run->exit_reason), -1 for error 111 */ 112 static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run) 113 { 114 u32 hsr = kvm_vcpu_get_hsr(vcpu); 115 int ret = 0; 116 117 run->exit_reason = KVM_EXIT_DEBUG; 118 run->debug.arch.hsr = hsr; 119 120 switch (ESR_ELx_EC(hsr)) { 121 case ESR_ELx_EC_WATCHPT_LOW: 122 run->debug.arch.far = vcpu->arch.fault.far_el2; 123 /* fall through */ 124 case ESR_ELx_EC_SOFTSTP_LOW: 125 case ESR_ELx_EC_BREAKPT_LOW: 126 case ESR_ELx_EC_BKPT32: 127 case ESR_ELx_EC_BRK64: 128 break; 129 default: 130 kvm_err("%s: un-handled case hsr: %#08x\n", 131 __func__, (unsigned int) hsr); 132 ret = -1; 133 break; 134 } 135 136 return ret; 137 } 138 139 static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run) 140 { 141 u32 hsr = kvm_vcpu_get_hsr(vcpu); 142 143 kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n", 144 hsr, esr_get_class_string(hsr)); 145 146 kvm_inject_undefined(vcpu); 147 return 1; 148 } 149 150 static exit_handle_fn arm_exit_handlers[] = { 151 [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec, 152 [ESR_ELx_EC_WFx] = kvm_handle_wfx, 153 [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32, 154 [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64, 155 [ESR_ELx_EC_CP14_MR] = kvm_handle_cp14_32, 156 [ESR_ELx_EC_CP14_LS] = kvm_handle_cp14_load_store, 157 [ESR_ELx_EC_CP14_64] = kvm_handle_cp14_64, 158 [ESR_ELx_EC_HVC32] = handle_hvc, 159 [ESR_ELx_EC_SMC32] = handle_smc, 160 [ESR_ELx_EC_HVC64] = handle_hvc, 161 [ESR_ELx_EC_SMC64] = handle_smc, 162 [ESR_ELx_EC_SYS64] = kvm_handle_sys_reg, 163 [ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort, 164 [ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort, 165 [ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug, 166 [ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug, 167 [ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug, 168 [ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug, 169 [ESR_ELx_EC_BRK64] = kvm_handle_guest_debug, 170 [ESR_ELx_EC_FP_ASIMD] = handle_no_fpsimd, 171 }; 172 173 static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) 174 { 175 u32 hsr = kvm_vcpu_get_hsr(vcpu); 176 u8 hsr_ec = ESR_ELx_EC(hsr); 177 178 return arm_exit_handlers[hsr_ec]; 179 } 180 181 /* 182 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on 183 * proper exit to userspace. 184 */ 185 int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, 186 int exception_index) 187 { 188 exit_handle_fn exit_handler; 189 190 if (ARM_SERROR_PENDING(exception_index)) { 191 u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); 192 193 /* 194 * HVC/SMC already have an adjusted PC, which we need 195 * to correct in order to return to after having 196 * injected the SError. 197 */ 198 if (hsr_ec == ESR_ELx_EC_HVC32 || hsr_ec == ESR_ELx_EC_HVC64 || 199 hsr_ec == ESR_ELx_EC_SMC32 || hsr_ec == ESR_ELx_EC_SMC64) { 200 u32 adj = kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2; 201 *vcpu_pc(vcpu) -= adj; 202 } 203 204 kvm_inject_vabt(vcpu); 205 return 1; 206 } 207 208 exception_index = ARM_EXCEPTION_CODE(exception_index); 209 210 switch (exception_index) { 211 case ARM_EXCEPTION_IRQ: 212 return 1; 213 case ARM_EXCEPTION_EL1_SERROR: 214 kvm_inject_vabt(vcpu); 215 return 1; 216 case ARM_EXCEPTION_TRAP: 217 /* 218 * See ARM ARM B1.14.1: "Hyp traps on instructions 219 * that fail their condition code check" 220 */ 221 if (!kvm_condition_valid(vcpu)) { 222 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 223 return 1; 224 } 225 226 exit_handler = kvm_get_exit_handler(vcpu); 227 228 return exit_handler(vcpu, run); 229 case ARM_EXCEPTION_HYP_GONE: 230 /* 231 * EL2 has been reset to the hyp-stub. This happens when a guest 232 * is pre-empted by kvm_reboot()'s shutdown call. 233 */ 234 run->exit_reason = KVM_EXIT_FAIL_ENTRY; 235 return 0; 236 default: 237 kvm_pr_unimpl("Unsupported exception type: %d", 238 exception_index); 239 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 240 return 0; 241 } 242 } 243