1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Fault injection for both 32 and 64bit guests. 4 * 5 * Copyright (C) 2012,2013 - ARM Ltd 6 * Author: Marc Zyngier <marc.zyngier@arm.com> 7 * 8 * Based on arch/arm/kvm/emulate.c 9 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 10 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 11 */ 12 13 #include <linux/kvm_host.h> 14 #include <asm/kvm_emulate.h> 15 #include <asm/esr.h> 16 17 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr) 18 { 19 unsigned long cpsr = *vcpu_cpsr(vcpu); 20 bool is_aarch32 = vcpu_mode_is_32bit(vcpu); 21 u64 esr = 0; 22 23 kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC); 24 25 vcpu_write_sys_reg(vcpu, addr, FAR_EL1); 26 27 /* 28 * Build an {i,d}abort, depending on the level and the 29 * instruction set. Report an external synchronous abort. 30 */ 31 if (kvm_vcpu_trap_il_is32bit(vcpu)) 32 esr |= ESR_ELx_IL; 33 34 /* 35 * Here, the guest runs in AArch64 mode when in EL1. If we get 36 * an AArch32 fault, it means we managed to trap an EL0 fault. 37 */ 38 if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t) 39 esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT); 40 else 41 esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT); 42 43 if (!is_iabt) 44 esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT; 45 46 vcpu_write_sys_reg(vcpu, esr | ESR_ELx_FSC_EXTABT, ESR_EL1); 47 } 48 49 static void inject_undef64(struct kvm_vcpu *vcpu) 50 { 51 u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT); 52 53 kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC); 54 55 /* 56 * Build an unknown exception, depending on the instruction 57 * set. 58 */ 59 if (kvm_vcpu_trap_il_is32bit(vcpu)) 60 esr |= ESR_ELx_IL; 61 62 vcpu_write_sys_reg(vcpu, esr, ESR_EL1); 63 } 64 65 #define DFSR_FSC_EXTABT_LPAE 0x10 66 #define DFSR_FSC_EXTABT_nLPAE 0x08 67 #define DFSR_LPAE BIT(9) 68 #define TTBCR_EAE BIT(31) 69 70 static void inject_undef32(struct kvm_vcpu *vcpu) 71 { 72 kvm_pend_exception(vcpu, EXCEPT_AA32_UND); 73 } 74 75 /* 76 * Modelled after TakeDataAbortException() and TakePrefetchAbortException 77 * pseudocode. 78 */ 79 static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr) 80 { 81 u64 far; 82 u32 fsr; 83 84 /* Give the guest an IMPLEMENTATION DEFINED exception */ 85 if (vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE) { 86 fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE; 87 } else { 88 /* no need to shuffle FS[4] into DFSR[10] as its 0 */ 89 fsr = DFSR_FSC_EXTABT_nLPAE; 90 } 91 92 far = vcpu_read_sys_reg(vcpu, FAR_EL1); 93 94 if (is_pabt) { 95 kvm_pend_exception(vcpu, EXCEPT_AA32_IABT); 96 far &= GENMASK(31, 0); 97 far |= (u64)addr << 32; 98 vcpu_write_sys_reg(vcpu, fsr, IFSR32_EL2); 99 } else { /* !iabt */ 100 kvm_pend_exception(vcpu, EXCEPT_AA32_DABT); 101 far &= GENMASK(63, 32); 102 far |= addr; 103 vcpu_write_sys_reg(vcpu, fsr, ESR_EL1); 104 } 105 106 vcpu_write_sys_reg(vcpu, far, FAR_EL1); 107 } 108 109 /** 110 * kvm_inject_dabt - inject a data abort into the guest 111 * @vcpu: The VCPU to receive the data abort 112 * @addr: The address to report in the DFAR 113 * 114 * It is assumed that this code is called from the VCPU thread and that the 115 * VCPU therefore is not currently executing guest code. 116 */ 117 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) 118 { 119 if (vcpu_el1_is_32bit(vcpu)) 120 inject_abt32(vcpu, false, addr); 121 else 122 inject_abt64(vcpu, false, addr); 123 } 124 125 /** 126 * kvm_inject_pabt - inject a prefetch abort into the guest 127 * @vcpu: The VCPU to receive the prefetch abort 128 * @addr: The address to report in the DFAR 129 * 130 * It is assumed that this code is called from the VCPU thread and that the 131 * VCPU therefore is not currently executing guest code. 132 */ 133 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) 134 { 135 if (vcpu_el1_is_32bit(vcpu)) 136 inject_abt32(vcpu, true, addr); 137 else 138 inject_abt64(vcpu, true, addr); 139 } 140 141 void kvm_inject_size_fault(struct kvm_vcpu *vcpu) 142 { 143 unsigned long addr, esr; 144 145 addr = kvm_vcpu_get_fault_ipa(vcpu); 146 addr |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0); 147 148 if (kvm_vcpu_trap_is_iabt(vcpu)) 149 kvm_inject_pabt(vcpu, addr); 150 else 151 kvm_inject_dabt(vcpu, addr); 152 153 /* 154 * If AArch64 or LPAE, set FSC to 0 to indicate an Address 155 * Size Fault at level 0, as if exceeding PARange. 156 * 157 * Non-LPAE guests will only get the external abort, as there 158 * is no way to to describe the ASF. 159 */ 160 if (vcpu_el1_is_32bit(vcpu) && 161 !(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE)) 162 return; 163 164 esr = vcpu_read_sys_reg(vcpu, ESR_EL1); 165 esr &= ~GENMASK_ULL(5, 0); 166 vcpu_write_sys_reg(vcpu, esr, ESR_EL1); 167 } 168 169 /** 170 * kvm_inject_undefined - inject an undefined instruction into the guest 171 * @vcpu: The vCPU in which to inject the exception 172 * 173 * It is assumed that this code is called from the VCPU thread and that the 174 * VCPU therefore is not currently executing guest code. 175 */ 176 void kvm_inject_undefined(struct kvm_vcpu *vcpu) 177 { 178 if (vcpu_el1_is_32bit(vcpu)) 179 inject_undef32(vcpu); 180 else 181 inject_undef64(vcpu); 182 } 183 184 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr) 185 { 186 vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK); 187 *vcpu_hcr(vcpu) |= HCR_VSE; 188 } 189 190 /** 191 * kvm_inject_vabt - inject an async abort / SError into the guest 192 * @vcpu: The VCPU to receive the exception 193 * 194 * It is assumed that this code is called from the VCPU thread and that the 195 * VCPU therefore is not currently executing guest code. 196 * 197 * Systems with the RAS Extensions specify an imp-def ESR (ISV/IDS = 1) with 198 * the remaining ISS all-zeros so that this error is not interpreted as an 199 * uncategorized RAS error. Without the RAS Extensions we can't specify an ESR 200 * value, so the CPU generates an imp-def value. 201 */ 202 void kvm_inject_vabt(struct kvm_vcpu *vcpu) 203 { 204 kvm_set_sei_esr(vcpu, ESR_ELx_ISV); 205 } 206