1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Fault injection for both 32 and 64bit guests. 4 * 5 * Copyright (C) 2012,2013 - ARM Ltd 6 * Author: Marc Zyngier <marc.zyngier@arm.com> 7 * 8 * Based on arch/arm/kvm/emulate.c 9 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 10 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 11 */ 12 13 #include <linux/kvm_host.h> 14 #include <asm/kvm_emulate.h> 15 #include <asm/kvm_nested.h> 16 #include <asm/esr.h> 17 18 static unsigned int exception_target_el(struct kvm_vcpu *vcpu) 19 { 20 /* If not nesting, EL1 is the only possible exception target */ 21 if (likely(!vcpu_has_nv(vcpu))) 22 return PSR_MODE_EL1h; 23 24 /* 25 * With NV, we need to pick between EL1 and EL2. Note that we 26 * never deal with a nesting exception here, hence never 27 * changing context, and the exception itself can be delayed 28 * until the next entry. 29 */ 30 switch(*vcpu_cpsr(vcpu) & PSR_MODE_MASK) { 31 case PSR_MODE_EL2h: 32 case PSR_MODE_EL2t: 33 return PSR_MODE_EL2h; 34 case PSR_MODE_EL1h: 35 case PSR_MODE_EL1t: 36 return PSR_MODE_EL1h; 37 case PSR_MODE_EL0t: 38 return vcpu_el2_tge_is_set(vcpu) ? PSR_MODE_EL2h : PSR_MODE_EL1h; 39 default: 40 BUG(); 41 } 42 } 43 44 static enum vcpu_sysreg exception_esr_elx(struct kvm_vcpu *vcpu) 45 { 46 if (exception_target_el(vcpu) == PSR_MODE_EL2h) 47 return ESR_EL2; 48 49 return ESR_EL1; 50 } 51 52 static enum vcpu_sysreg exception_far_elx(struct kvm_vcpu *vcpu) 53 { 54 if (exception_target_el(vcpu) == PSR_MODE_EL2h) 55 return FAR_EL2; 56 57 return FAR_EL1; 58 } 59 60 static void pend_sync_exception(struct kvm_vcpu *vcpu) 61 { 62 if (exception_target_el(vcpu) == PSR_MODE_EL1h) 63 kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC); 64 else 65 kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC); 66 } 67 68 static void pend_serror_exception(struct kvm_vcpu *vcpu) 69 { 70 if (exception_target_el(vcpu) == PSR_MODE_EL1h) 71 kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SERR); 72 else 73 kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SERR); 74 } 75 76 static bool __effective_sctlr2_bit(struct kvm_vcpu *vcpu, unsigned int idx) 77 { 78 u64 sctlr2; 79 80 if (!kvm_has_sctlr2(vcpu->kvm)) 81 return false; 82 83 if (is_nested_ctxt(vcpu) && 84 !(__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_SCTLR2En)) 85 return false; 86 87 if (exception_target_el(vcpu) == PSR_MODE_EL1h) 88 sctlr2 = vcpu_read_sys_reg(vcpu, SCTLR2_EL1); 89 else 90 sctlr2 = vcpu_read_sys_reg(vcpu, SCTLR2_EL2); 91 92 return sctlr2 & BIT(idx); 93 } 94 95 static bool effective_sctlr2_ease(struct kvm_vcpu *vcpu) 96 { 97 return __effective_sctlr2_bit(vcpu, SCTLR2_EL1_EASE_SHIFT); 98 } 99 100 static bool effective_sctlr2_nmea(struct kvm_vcpu *vcpu) 101 { 102 return __effective_sctlr2_bit(vcpu, SCTLR2_EL1_NMEA_SHIFT); 103 } 104 105 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr) 106 { 107 unsigned long cpsr = *vcpu_cpsr(vcpu); 108 bool is_aarch32 = vcpu_mode_is_32bit(vcpu); 109 u64 esr = 0, fsc; 110 int level; 111 112 /* 113 * If injecting an abort from a failed S1PTW, rewalk the S1 PTs to 114 * find the failing level. If we can't find it, assume the error was 115 * transient and restart without changing the state. 116 */ 117 if (kvm_vcpu_abt_iss1tw(vcpu)) { 118 u64 hpfar = kvm_vcpu_get_fault_ipa(vcpu); 119 int ret; 120 121 if (hpfar == INVALID_GPA) 122 return; 123 124 ret = __kvm_find_s1_desc_level(vcpu, addr, hpfar, &level); 125 if (ret) 126 return; 127 128 WARN_ON_ONCE(level < -1 || level > 3); 129 fsc = ESR_ELx_FSC_SEA_TTW(level); 130 } else { 131 fsc = ESR_ELx_FSC_EXTABT; 132 } 133 134 /* This delight is brought to you by FEAT_DoubleFault2. */ 135 if (effective_sctlr2_ease(vcpu)) 136 pend_serror_exception(vcpu); 137 else 138 pend_sync_exception(vcpu); 139 140 /* 141 * Build an {i,d}abort, depending on the level and the 142 * instruction set. Report an external synchronous abort. 143 */ 144 if (kvm_vcpu_trap_il_is32bit(vcpu)) 145 esr |= ESR_ELx_IL; 146 147 /* 148 * Here, the guest runs in AArch64 mode when in EL1. If we get 149 * an AArch32 fault, it means we managed to trap an EL0 fault. 150 */ 151 if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t) 152 esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT); 153 else 154 esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT); 155 156 if (!is_iabt) 157 esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT; 158 159 esr |= fsc; 160 161 vcpu_write_sys_reg(vcpu, addr, exception_far_elx(vcpu)); 162 vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu)); 163 } 164 165 void kvm_inject_sync(struct kvm_vcpu *vcpu, u64 esr) 166 { 167 pend_sync_exception(vcpu); 168 vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu)); 169 } 170 171 static void inject_undef64(struct kvm_vcpu *vcpu) 172 { 173 u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT); 174 175 /* 176 * Build an unknown exception, depending on the instruction 177 * set. 178 */ 179 if (kvm_vcpu_trap_il_is32bit(vcpu)) 180 esr |= ESR_ELx_IL; 181 182 kvm_inject_sync(vcpu, esr); 183 } 184 185 #define DFSR_FSC_EXTABT_LPAE 0x10 186 #define DFSR_FSC_EXTABT_nLPAE 0x08 187 #define DFSR_LPAE BIT(9) 188 #define TTBCR_EAE BIT(31) 189 190 static void inject_undef32(struct kvm_vcpu *vcpu) 191 { 192 kvm_pend_exception(vcpu, EXCEPT_AA32_UND); 193 } 194 195 /* 196 * Modelled after TakeDataAbortException() and TakePrefetchAbortException 197 * pseudocode. 198 */ 199 static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr) 200 { 201 u64 far; 202 u32 fsr; 203 204 /* Give the guest an IMPLEMENTATION DEFINED exception */ 205 if (vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE) { 206 fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE; 207 } else { 208 /* no need to shuffle FS[4] into DFSR[10] as it's 0 */ 209 fsr = DFSR_FSC_EXTABT_nLPAE; 210 } 211 212 far = vcpu_read_sys_reg(vcpu, FAR_EL1); 213 214 if (is_pabt) { 215 kvm_pend_exception(vcpu, EXCEPT_AA32_IABT); 216 far &= GENMASK(31, 0); 217 far |= (u64)addr << 32; 218 vcpu_write_sys_reg(vcpu, fsr, IFSR32_EL2); 219 } else { /* !iabt */ 220 kvm_pend_exception(vcpu, EXCEPT_AA32_DABT); 221 far &= GENMASK(63, 32); 222 far |= addr; 223 vcpu_write_sys_reg(vcpu, fsr, ESR_EL1); 224 } 225 226 vcpu_write_sys_reg(vcpu, far, FAR_EL1); 227 } 228 229 static void __kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr) 230 { 231 if (vcpu_el1_is_32bit(vcpu)) 232 inject_abt32(vcpu, iabt, addr); 233 else 234 inject_abt64(vcpu, iabt, addr); 235 } 236 237 static bool kvm_sea_target_is_el2(struct kvm_vcpu *vcpu) 238 { 239 if (__vcpu_sys_reg(vcpu, HCR_EL2) & (HCR_TGE | HCR_TEA)) 240 return true; 241 242 if (!vcpu_mode_priv(vcpu)) 243 return false; 244 245 return (*vcpu_cpsr(vcpu) & PSR_A_BIT) && 246 (__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TMEA); 247 } 248 249 int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr) 250 { 251 lockdep_assert_held(&vcpu->mutex); 252 253 if (is_nested_ctxt(vcpu) && kvm_sea_target_is_el2(vcpu)) 254 return kvm_inject_nested_sea(vcpu, iabt, addr); 255 256 __kvm_inject_sea(vcpu, iabt, addr); 257 return 1; 258 } 259 260 static int kvm_inject_nested_excl_atomic(struct kvm_vcpu *vcpu, u64 addr) 261 { 262 u64 esr = FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_DABT_LOW) | 263 FIELD_PREP(ESR_ELx_FSC, ESR_ELx_FSC_EXCL_ATOMIC) | 264 ESR_ELx_IL; 265 266 vcpu_write_sys_reg(vcpu, addr, FAR_EL2); 267 return kvm_inject_nested_sync(vcpu, esr); 268 } 269 270 /** 271 * kvm_inject_dabt_excl_atomic - inject a data abort for unsupported exclusive 272 * or atomic access 273 * @vcpu: The VCPU to receive the data abort 274 * @addr: The address to report in the DFAR 275 * 276 * It is assumed that this code is called from the VCPU thread and that the 277 * VCPU therefore is not currently executing guest code. 278 */ 279 int kvm_inject_dabt_excl_atomic(struct kvm_vcpu *vcpu, u64 addr) 280 { 281 u64 esr; 282 283 if (is_nested_ctxt(vcpu) && (vcpu_read_sys_reg(vcpu, HCR_EL2) & HCR_VM)) 284 return kvm_inject_nested_excl_atomic(vcpu, addr); 285 286 __kvm_inject_sea(vcpu, false, addr); 287 esr = vcpu_read_sys_reg(vcpu, exception_esr_elx(vcpu)); 288 esr &= ~ESR_ELx_FSC; 289 esr |= ESR_ELx_FSC_EXCL_ATOMIC; 290 vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu)); 291 return 1; 292 } 293 294 void kvm_inject_size_fault(struct kvm_vcpu *vcpu) 295 { 296 unsigned long addr, esr; 297 298 addr = kvm_vcpu_get_fault_ipa(vcpu); 299 addr |= FAR_TO_FIPA_OFFSET(kvm_vcpu_get_hfar(vcpu)); 300 301 __kvm_inject_sea(vcpu, kvm_vcpu_trap_is_iabt(vcpu), addr); 302 303 /* 304 * If AArch64 or LPAE, set FSC to 0 to indicate an Address 305 * Size Fault at level 0, as if exceeding PARange. 306 * 307 * Non-LPAE guests will only get the external abort, as there 308 * is no way to describe the ASF. 309 */ 310 if (vcpu_el1_is_32bit(vcpu) && 311 !(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE)) 312 return; 313 314 esr = vcpu_read_sys_reg(vcpu, exception_esr_elx(vcpu)); 315 esr &= ~GENMASK_ULL(5, 0); 316 vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu)); 317 } 318 319 /** 320 * kvm_inject_undefined - inject an undefined instruction into the guest 321 * @vcpu: The vCPU in which to inject the exception 322 * 323 * It is assumed that this code is called from the VCPU thread and that the 324 * VCPU therefore is not currently executing guest code. 325 */ 326 void kvm_inject_undefined(struct kvm_vcpu *vcpu) 327 { 328 if (vcpu_el1_is_32bit(vcpu)) 329 inject_undef32(vcpu); 330 else 331 inject_undef64(vcpu); 332 } 333 334 static bool serror_is_masked(struct kvm_vcpu *vcpu) 335 { 336 return (*vcpu_cpsr(vcpu) & PSR_A_BIT) && !effective_sctlr2_nmea(vcpu); 337 } 338 339 static bool kvm_serror_target_is_el2(struct kvm_vcpu *vcpu) 340 { 341 if (is_hyp_ctxt(vcpu) || vcpu_el2_amo_is_set(vcpu)) 342 return true; 343 344 if (!(__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TMEA)) 345 return false; 346 347 /* 348 * In another example where FEAT_DoubleFault2 is entirely backwards, 349 * "masked" as it relates to the routing effects of HCRX_EL2.TMEA 350 * doesn't consider SCTLR2_EL1.NMEA. That is to say, even if EL1 asked 351 * for non-maskable SErrors, the EL2 bit takes priority if A is set. 352 */ 353 if (vcpu_mode_priv(vcpu)) 354 return *vcpu_cpsr(vcpu) & PSR_A_BIT; 355 356 /* 357 * Otherwise SErrors are considered unmasked when taken from EL0 and 358 * NMEA is set. 359 */ 360 return serror_is_masked(vcpu); 361 } 362 363 static bool kvm_serror_undeliverable_at_el2(struct kvm_vcpu *vcpu) 364 { 365 return !(vcpu_el2_tge_is_set(vcpu) || vcpu_el2_amo_is_set(vcpu)); 366 } 367 368 int kvm_inject_serror_esr(struct kvm_vcpu *vcpu, u64 esr) 369 { 370 lockdep_assert_held(&vcpu->mutex); 371 372 if (is_nested_ctxt(vcpu) && kvm_serror_target_is_el2(vcpu)) 373 return kvm_inject_nested_serror(vcpu, esr); 374 375 if (vcpu_is_el2(vcpu) && kvm_serror_undeliverable_at_el2(vcpu)) { 376 vcpu_set_vsesr(vcpu, esr); 377 vcpu_set_flag(vcpu, NESTED_SERROR_PENDING); 378 return 1; 379 } 380 381 /* 382 * Emulate the exception entry if SErrors are unmasked. This is useful if 383 * the vCPU is in a nested context w/ vSErrors enabled then we've already 384 * delegated he hardware vSError context (i.e. HCR_EL2.VSE, VSESR_EL2, 385 * VDISR_EL2) to the guest hypervisor. 386 * 387 * As we're emulating the SError injection we need to explicitly populate 388 * ESR_ELx.EC because hardware will not do it on our behalf. 389 */ 390 if (!serror_is_masked(vcpu)) { 391 pend_serror_exception(vcpu); 392 esr |= FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_SERROR); 393 vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu)); 394 return 1; 395 } 396 397 vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK); 398 *vcpu_hcr(vcpu) |= HCR_VSE; 399 return 1; 400 } 401