1 /* 2 * Fault injection for both 32 and 64bit guests. 3 * 4 * Copyright (C) 2012,2013 - ARM Ltd 5 * Author: Marc Zyngier <marc.zyngier@arm.com> 6 * 7 * Based on arch/arm/kvm/emulate.c 8 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 9 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 10 * 11 * This program is free software: you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License version 2 as 13 * published by the Free Software Foundation. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program. If not, see <http://www.gnu.org/licenses/>. 22 */ 23 24 #include <linux/kvm_host.h> 25 #include <asm/kvm_emulate.h> 26 #include <asm/esr.h> 27 28 #define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \ 29 PSR_I_BIT | PSR_D_BIT) 30 #define EL1_EXCEPT_SYNC_OFFSET 0x200 31 32 static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) 33 { 34 unsigned long cpsr; 35 unsigned long new_spsr_value = *vcpu_cpsr(vcpu); 36 bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT); 37 u32 return_offset = (is_thumb) ? 4 : 0; 38 u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); 39 40 cpsr = mode | COMPAT_PSR_I_BIT; 41 42 if (sctlr & (1 << 30)) 43 cpsr |= COMPAT_PSR_T_BIT; 44 if (sctlr & (1 << 25)) 45 cpsr |= COMPAT_PSR_E_BIT; 46 47 *vcpu_cpsr(vcpu) = cpsr; 48 49 /* Note: These now point to the banked copies */ 50 *vcpu_spsr(vcpu) = new_spsr_value; 51 *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; 52 53 /* Branch to exception vector */ 54 if (sctlr & (1 << 13)) 55 vect_offset += 0xffff0000; 56 else /* always have security exceptions */ 57 vect_offset += vcpu_cp15(vcpu, c12_VBAR); 58 59 *vcpu_pc(vcpu) = vect_offset; 60 } 61 62 static void inject_undef32(struct kvm_vcpu *vcpu) 63 { 64 prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4); 65 } 66 67 /* 68 * Modelled after TakeDataAbortException() and TakePrefetchAbortException 69 * pseudocode. 70 */ 71 static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, 72 unsigned long addr) 73 { 74 u32 vect_offset; 75 u32 *far, *fsr; 76 bool is_lpae; 77 78 if (is_pabt) { 79 vect_offset = 12; 80 far = &vcpu_cp15(vcpu, c6_IFAR); 81 fsr = &vcpu_cp15(vcpu, c5_IFSR); 82 } else { /* !iabt */ 83 vect_offset = 16; 84 far = &vcpu_cp15(vcpu, c6_DFAR); 85 fsr = &vcpu_cp15(vcpu, c5_DFSR); 86 } 87 88 prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset); 89 90 *far = addr; 91 92 /* Give the guest an IMPLEMENTATION DEFINED exception */ 93 is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); 94 if (is_lpae) 95 *fsr = 1 << 9 | 0x34; 96 else 97 *fsr = 0x14; 98 } 99 100 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr) 101 { 102 unsigned long cpsr = *vcpu_cpsr(vcpu); 103 bool is_aarch32; 104 u32 esr = 0; 105 106 is_aarch32 = vcpu_mode_is_32bit(vcpu); 107 108 *vcpu_spsr(vcpu) = cpsr; 109 *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu); 110 111 *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64; 112 *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET; 113 114 vcpu_sys_reg(vcpu, FAR_EL1) = addr; 115 116 /* 117 * Build an {i,d}abort, depending on the level and the 118 * instruction set. Report an external synchronous abort. 119 */ 120 if (kvm_vcpu_trap_il_is32bit(vcpu)) 121 esr |= ESR_EL1_IL; 122 123 /* 124 * Here, the guest runs in AArch64 mode when in EL1. If we get 125 * an AArch32 fault, it means we managed to trap an EL0 fault. 126 */ 127 if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t) 128 esr |= (ESR_EL1_EC_IABT_EL0 << ESR_EL1_EC_SHIFT); 129 else 130 esr |= (ESR_EL1_EC_IABT_EL1 << ESR_EL1_EC_SHIFT); 131 132 if (!is_iabt) 133 esr |= ESR_EL1_EC_DABT_EL0; 134 135 vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_EL2_EC_xABT_xFSR_EXTABT; 136 } 137 138 static void inject_undef64(struct kvm_vcpu *vcpu) 139 { 140 unsigned long cpsr = *vcpu_cpsr(vcpu); 141 u32 esr = (ESR_EL1_EC_UNKNOWN << ESR_EL1_EC_SHIFT); 142 143 *vcpu_spsr(vcpu) = cpsr; 144 *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu); 145 146 *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64; 147 *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET; 148 149 /* 150 * Build an unknown exception, depending on the instruction 151 * set. 152 */ 153 if (kvm_vcpu_trap_il_is32bit(vcpu)) 154 esr |= ESR_EL1_IL; 155 156 vcpu_sys_reg(vcpu, ESR_EL1) = esr; 157 } 158 159 /** 160 * kvm_inject_dabt - inject a data abort into the guest 161 * @vcpu: The VCPU to receive the undefined exception 162 * @addr: The address to report in the DFAR 163 * 164 * It is assumed that this code is called from the VCPU thread and that the 165 * VCPU therefore is not currently executing guest code. 166 */ 167 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) 168 { 169 if (!(vcpu->arch.hcr_el2 & HCR_RW)) 170 inject_abt32(vcpu, false, addr); 171 172 inject_abt64(vcpu, false, addr); 173 } 174 175 /** 176 * kvm_inject_pabt - inject a prefetch abort into the guest 177 * @vcpu: The VCPU to receive the undefined exception 178 * @addr: The address to report in the DFAR 179 * 180 * It is assumed that this code is called from the VCPU thread and that the 181 * VCPU therefore is not currently executing guest code. 182 */ 183 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) 184 { 185 if (!(vcpu->arch.hcr_el2 & HCR_RW)) 186 inject_abt32(vcpu, true, addr); 187 188 inject_abt64(vcpu, true, addr); 189 } 190 191 /** 192 * kvm_inject_undefined - inject an undefined instruction into the guest 193 * 194 * It is assumed that this code is called from the VCPU thread and that the 195 * VCPU therefore is not currently executing guest code. 196 */ 197 void kvm_inject_undefined(struct kvm_vcpu *vcpu) 198 { 199 if (!(vcpu->arch.hcr_el2 & HCR_RW)) 200 inject_undef32(vcpu); 201 202 inject_undef64(vcpu); 203 } 204